repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
ZolaIII/android_kernel_synopsis_deprecated | drivers/misc/eeprom/at24.c | 8254 | 19325 | /*
* at24.c - handle most I2C EEPROMs
*
* Copyright (C) 2005-2007 David Brownell
* Copyright (C) 2008 Wolfram Sang, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
/*
* I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
* Differences between different vendor product lines (like Atmel AT24C or
* MicroChip 24LC, etc) won't much matter for typical read/write access.
* There are also I2C RAM chips, likewise interchangeable. One example
* would be the PCF8570, which acts like a 24c02 EEPROM (256 bytes).
*
* However, misconfiguration can lose data. "Set 16-bit memory address"
* to a part with 8-bit addressing will overwrite data. Writing with too
* big a page size also loses data. And it's not safe to assume that the
* conventional addresses 0x50..0x57 only hold eeproms; a PCF8563 RTC
* uses 0x51, for just one example.
*
* Accordingly, explicit board-specific configuration data should be used
* in almost all cases. (One partial exception is an SMBus used to access
* "SPD" data for DRAM sticks. Those only use 24c02 EEPROMs.)
*
* So this driver uses "new style" I2C driver binding, expecting to be
* told what devices exist. That may be in arch/X/mach-Y/board-Z.c or
* similar kernel-resident tables; or, configuration data coming from
* a bootloader.
*
* Other than binding model, current differences from "eeprom" driver are
* that this one handles write access and isn't restricted to 24c02 devices.
* It also handles larger devices (32 kbit and up) with two-byte addresses,
* which won't work on pure SMBus systems.
*/
struct at24_data {
struct at24_platform_data chip;
struct memory_accessor macc;
int use_smbus;
/*
* Lock protects against activities from other Linux tasks,
* but not from changes by other I2C masters.
*/
struct mutex lock;
struct bin_attribute bin;
u8 *writebuf;
unsigned write_max;
unsigned num_addresses;
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
* them for us, and we'll use them with SMBus calls.
*/
struct i2c_client *client[];
};
/*
* This parameter is to help this driver avoid blocking other drivers out
* of I2C for potentially troublesome amounts of time. With a 100 kHz I2C
* clock, one 256 byte read takes about 1/43 second which is excessive;
* but the 1/170 second it takes at 400 kHz may be quite reasonable; and
* at 1 MHz (Fm+) a 1/430 second delay could easily be invisible.
*
* This value is forced to be a power of two so that writes align on pages.
*/
static unsigned io_limit = 128;
module_param(io_limit, uint, 0);
MODULE_PARM_DESC(io_limit, "Maximum bytes per I/O (default 128)");
/*
* Specs often allow 5 msec for a page write, sometimes 20 msec;
* it's important to recover from write timeouts.
*/
static unsigned write_timeout = 25;
module_param(write_timeout, uint, 0);
MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)");
#define AT24_SIZE_BYTELEN 5
#define AT24_SIZE_FLAGS 8
#define AT24_BITMASK(x) (BIT(x) - 1)
/* create non-zero magic value for given eeprom parameters */
#define AT24_DEVICE_MAGIC(_len, _flags) \
((1 << AT24_SIZE_FLAGS | (_flags)) \
<< AT24_SIZE_BYTELEN | ilog2(_len))
static const struct i2c_device_id at24_ids[] = {
/* needs 8 addresses as A0-A2 are ignored */
{ "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) },
/* old variants can't be handled with this generic entry! */
{ "24c01", AT24_DEVICE_MAGIC(1024 / 8, 0) },
{ "24c02", AT24_DEVICE_MAGIC(2048 / 8, 0) },
/* spd is a 24c02 in memory DIMMs */
{ "spd", AT24_DEVICE_MAGIC(2048 / 8,
AT24_FLAG_READONLY | AT24_FLAG_IRUGO) },
{ "24c04", AT24_DEVICE_MAGIC(4096 / 8, 0) },
/* 24rf08 quirk is handled at i2c-core */
{ "24c08", AT24_DEVICE_MAGIC(8192 / 8, 0) },
{ "24c16", AT24_DEVICE_MAGIC(16384 / 8, 0) },
{ "24c32", AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16) },
{ "24c64", AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16) },
{ "24c128", AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16) },
{ "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) },
{ "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) },
{ "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) },
{ "at24", 0 },
{ /* END OF LIST */ }
};
MODULE_DEVICE_TABLE(i2c, at24_ids);
/*-------------------------------------------------------------------------*/
/*
* This routine supports chips which consume multiple I2C addresses. It
* computes the addressing information to be used for a given r/w request.
* Assumes that sanity checks for offset happened at sysfs-layer.
*/
static struct i2c_client *at24_translate_offset(struct at24_data *at24,
unsigned *offset)
{
unsigned i;
if (at24->chip.flags & AT24_FLAG_ADDR16) {
i = *offset >> 16;
*offset &= 0xffff;
} else {
i = *offset >> 8;
*offset &= 0xff;
}
return at24->client[i];
}
static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
unsigned offset, size_t count)
{
struct i2c_msg msg[2];
u8 msgbuf[2];
struct i2c_client *client;
unsigned long timeout, read_time;
int status, i;
memset(msg, 0, sizeof(msg));
/*
* REVISIT some multi-address chips don't rollover page reads to
* the next slave address, so we may need to truncate the count.
* Those chips might need another quirk flag.
*
* If the real hardware used four adjacent 24c02 chips and that
* were misconfigured as one 24c08, that would be a similar effect:
* one "eeprom" file not four, but larger reads would fail when
* they crossed certain pages.
*/
/*
* Slave address and byte offset derive from the offset. Always
* set the byte address; on a multi-master board, another master
* may have changed the chip's "current" address pointer.
*/
client = at24_translate_offset(at24, &offset);
if (count > io_limit)
count = io_limit;
switch (at24->use_smbus) {
case I2C_SMBUS_I2C_BLOCK_DATA:
/* Smaller eeproms can work given some SMBus extension calls */
if (count > I2C_SMBUS_BLOCK_MAX)
count = I2C_SMBUS_BLOCK_MAX;
break;
case I2C_SMBUS_WORD_DATA:
count = 2;
break;
case I2C_SMBUS_BYTE_DATA:
count = 1;
break;
default:
/*
* When we have a better choice than SMBus calls, use a
* combined I2C message. Write address; then read up to
* io_limit data bytes. Note that read page rollover helps us
* here (unlike writes). msgbuf is u8 and will cast to our
* needs.
*/
i = 0;
if (at24->chip.flags & AT24_FLAG_ADDR16)
msgbuf[i++] = offset >> 8;
msgbuf[i++] = offset;
msg[0].addr = client->addr;
msg[0].buf = msgbuf;
msg[0].len = i;
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
msg[1].buf = buf;
msg[1].len = count;
}
/*
* Reads fail if the previous write didn't complete yet. We may
* loop a few times until this one succeeds, waiting at least
* long enough for one entire page write to work.
*/
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
read_time = jiffies;
switch (at24->use_smbus) {
case I2C_SMBUS_I2C_BLOCK_DATA:
status = i2c_smbus_read_i2c_block_data(client, offset,
count, buf);
break;
case I2C_SMBUS_WORD_DATA:
status = i2c_smbus_read_word_data(client, offset);
if (status >= 0) {
buf[0] = status & 0xff;
buf[1] = status >> 8;
status = count;
}
break;
case I2C_SMBUS_BYTE_DATA:
status = i2c_smbus_read_byte_data(client, offset);
if (status >= 0) {
buf[0] = status;
status = count;
}
break;
default:
status = i2c_transfer(client->adapter, msg, 2);
if (status == 2)
status = count;
}
dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
count, offset, status, jiffies);
if (status == count)
return count;
/* REVISIT: at HZ=100, this is sloooow */
msleep(1);
} while (time_before(read_time, timeout));
return -ETIMEDOUT;
}
static ssize_t at24_read(struct at24_data *at24,
char *buf, loff_t off, size_t count)
{
ssize_t retval = 0;
if (unlikely(!count))
return count;
/*
* Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
*/
mutex_lock(&at24->lock);
while (count) {
ssize_t status;
status = at24_eeprom_read(at24, buf, off, count);
if (status <= 0) {
if (retval == 0)
retval = status;
break;
}
buf += status;
off += status;
count -= status;
retval += status;
}
mutex_unlock(&at24->lock);
return retval;
}
static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
return at24_read(at24, buf, off, count);
}
/*
* Note that if the hardware write-protect pin is pulled high, the whole
* chip is normally write protected. But there are plenty of product
* variants here, including OTP fuses and partial chip protect.
*
* We only use page mode writes; the alternative is sloooow. This routine
* writes at most one page.
*/
static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
unsigned offset, size_t count)
{
struct i2c_client *client;
struct i2c_msg msg;
ssize_t status;
unsigned long timeout, write_time;
unsigned next_page;
/* Get corresponding I2C address and adjust offset */
client = at24_translate_offset(at24, &offset);
/* write_max is at most a page */
if (count > at24->write_max)
count = at24->write_max;
/* Never roll over backwards, to the start of this page */
next_page = roundup(offset + 1, at24->chip.page_size);
if (offset + count > next_page)
count = next_page - offset;
/* If we'll use I2C calls for I/O, set up the message */
if (!at24->use_smbus) {
int i = 0;
msg.addr = client->addr;
msg.flags = 0;
/* msg.buf is u8 and casts will mask the values */
msg.buf = at24->writebuf;
if (at24->chip.flags & AT24_FLAG_ADDR16)
msg.buf[i++] = offset >> 8;
msg.buf[i++] = offset;
memcpy(&msg.buf[i], buf, count);
msg.len = i + count;
}
/*
* Writes fail if the previous one didn't complete yet. We may
* loop a few times until this one succeeds, waiting at least
* long enough for one entire page write to work.
*/
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
write_time = jiffies;
if (at24->use_smbus) {
status = i2c_smbus_write_i2c_block_data(client,
offset, count, buf);
if (status == 0)
status = count;
} else {
status = i2c_transfer(client->adapter, &msg, 1);
if (status == 1)
status = count;
}
dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n",
count, offset, status, jiffies);
if (status == count)
return count;
/* REVISIT: at HZ=100, this is sloooow */
msleep(1);
} while (time_before(write_time, timeout));
return -ETIMEDOUT;
}
static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
size_t count)
{
ssize_t retval = 0;
if (unlikely(!count))
return count;
/*
* Write data to chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
*/
mutex_lock(&at24->lock);
while (count) {
ssize_t status;
status = at24_eeprom_write(at24, buf, off, count);
if (status <= 0) {
if (retval == 0)
retval = status;
break;
}
buf += status;
off += status;
count -= status;
retval += status;
}
mutex_unlock(&at24->lock);
return retval;
}
static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
return at24_write(at24, buf, off, count);
}
/*-------------------------------------------------------------------------*/
/*
* This lets other kernel code access the eeprom data. For example, it
* might hold a board's Ethernet address, or board-specific calibration
* data generated on the manufacturing floor.
*/
static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
off_t offset, size_t count)
{
struct at24_data *at24 = container_of(macc, struct at24_data, macc);
return at24_read(at24, buf, offset, count);
}
static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
off_t offset, size_t count)
{
struct at24_data *at24 = container_of(macc, struct at24_data, macc);
return at24_write(at24, buf, offset, count);
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_OF
static void at24_get_ofdata(struct i2c_client *client,
struct at24_platform_data *chip)
{
const __be32 *val;
struct device_node *node = client->dev.of_node;
if (node) {
if (of_get_property(node, "read-only", NULL))
chip->flags |= AT24_FLAG_READONLY;
val = of_get_property(node, "pagesize", NULL);
if (val)
chip->page_size = be32_to_cpup(val);
}
}
#else
static void at24_get_ofdata(struct i2c_client *client,
struct at24_platform_data *chip)
{ }
#endif /* CONFIG_OF */
static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct at24_platform_data chip;
bool writable;
int use_smbus = 0;
struct at24_data *at24;
int err;
unsigned i, num_addresses;
kernel_ulong_t magic;
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
} else {
if (!id->driver_data) {
err = -ENODEV;
goto err_out;
}
magic = id->driver_data;
chip.byte_len = BIT(magic & AT24_BITMASK(AT24_SIZE_BYTELEN));
magic >>= AT24_SIZE_BYTELEN;
chip.flags = magic & AT24_BITMASK(AT24_SIZE_FLAGS);
/*
* This is slow, but we can't know all eeproms, so we better
* play safe. Specifying custom eeprom-types via platform_data
* is recommended anyhow.
*/
chip.page_size = 1;
/* update chipdata if OF is present */
at24_get_ofdata(client, &chip);
chip.setup = NULL;
chip.context = NULL;
}
if (!is_power_of_2(chip.byte_len))
dev_warn(&client->dev,
"byte_len looks suspicious (no power of 2)!\n");
if (!chip.page_size) {
dev_err(&client->dev, "page_size must not be 0!\n");
err = -EINVAL;
goto err_out;
}
if (!is_power_of_2(chip.page_size))
dev_warn(&client->dev,
"page_size looks suspicious (no power of 2)!\n");
/* Use I2C operations unless we're stuck with SMBus extensions. */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
if (chip.flags & AT24_FLAG_ADDR16) {
err = -EPFNOSUPPORT;
goto err_out;
}
if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
} else if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_WORD_DATA)) {
use_smbus = I2C_SMBUS_WORD_DATA;
} else if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
use_smbus = I2C_SMBUS_BYTE_DATA;
} else {
err = -EPFNOSUPPORT;
goto err_out;
}
}
if (chip.flags & AT24_FLAG_TAKE8ADDR)
num_addresses = 8;
else
num_addresses = DIV_ROUND_UP(chip.byte_len,
(chip.flags & AT24_FLAG_ADDR16) ? 65536 : 256);
at24 = kzalloc(sizeof(struct at24_data) +
num_addresses * sizeof(struct i2c_client *), GFP_KERNEL);
if (!at24) {
err = -ENOMEM;
goto err_out;
}
mutex_init(&at24->lock);
at24->use_smbus = use_smbus;
at24->chip = chip;
at24->num_addresses = num_addresses;
/*
* Export the EEPROM bytes through sysfs, since that's convenient.
* By default, only root should see the data (maybe passwords etc)
*/
sysfs_bin_attr_init(&at24->bin);
at24->bin.attr.name = "eeprom";
at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
at24->bin.read = at24_bin_read;
at24->bin.size = chip.byte_len;
at24->macc.read = at24_macc_read;
writable = !(chip.flags & AT24_FLAG_READONLY);
if (writable) {
if (!use_smbus || i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
unsigned write_max = chip.page_size;
at24->macc.write = at24_macc_write;
at24->bin.write = at24_bin_write;
at24->bin.attr.mode |= S_IWUSR;
if (write_max > io_limit)
write_max = io_limit;
if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX)
write_max = I2C_SMBUS_BLOCK_MAX;
at24->write_max = write_max;
/* buffer (data + address at the beginning) */
at24->writebuf = kmalloc(write_max + 2, GFP_KERNEL);
if (!at24->writebuf) {
err = -ENOMEM;
goto err_struct;
}
} else {
dev_warn(&client->dev,
"cannot write due to controller restrictions.");
}
}
at24->client[0] = client;
/* use dummy devices for multiple-address chips */
for (i = 1; i < num_addresses; i++) {
at24->client[i] = i2c_new_dummy(client->adapter,
client->addr + i);
if (!at24->client[i]) {
dev_err(&client->dev, "address 0x%02x unavailable\n",
client->addr + i);
err = -EADDRINUSE;
goto err_clients;
}
}
err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin);
if (err)
goto err_clients;
i2c_set_clientdata(client, at24);
dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
at24->bin.size, client->name,
writable ? "writable" : "read-only", at24->write_max);
if (use_smbus == I2C_SMBUS_WORD_DATA ||
use_smbus == I2C_SMBUS_BYTE_DATA) {
dev_notice(&client->dev, "Falling back to %s reads, "
"performance will suffer\n", use_smbus ==
I2C_SMBUS_WORD_DATA ? "word" : "byte");
}
/* export data to kernel code */
if (chip.setup)
chip.setup(&at24->macc, chip.context);
return 0;
err_clients:
for (i = 1; i < num_addresses; i++)
if (at24->client[i])
i2c_unregister_device(at24->client[i]);
kfree(at24->writebuf);
err_struct:
kfree(at24);
err_out:
dev_dbg(&client->dev, "probe error %d\n", err);
return err;
}
static int __devexit at24_remove(struct i2c_client *client)
{
struct at24_data *at24;
int i;
at24 = i2c_get_clientdata(client);
sysfs_remove_bin_file(&client->dev.kobj, &at24->bin);
for (i = 1; i < at24->num_addresses; i++)
i2c_unregister_device(at24->client[i]);
kfree(at24->writebuf);
kfree(at24);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
.owner = THIS_MODULE,
},
.probe = at24_probe,
.remove = __devexit_p(at24_remove),
.id_table = at24_ids,
};
static int __init at24_init(void)
{
if (!io_limit) {
pr_err("at24: io_limit must not be 0!\n");
return -EINVAL;
}
io_limit = rounddown_pow_of_two(io_limit);
return i2c_add_driver(&at24_driver);
}
module_init(at24_init);
static void __exit at24_exit(void)
{
i2c_del_driver(&at24_driver);
}
module_exit(at24_exit);
MODULE_DESCRIPTION("Driver for most I2C EEPROMs");
MODULE_AUTHOR("David Brownell and Wolfram Sang");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ms705/linux | drivers/pcmcia/pxa2xx_colibri.c | 9790 | 4644 | /*
* linux/drivers/pcmcia/pxa2xx_colibri.c
*
* Driver for Toradex Colibri PXA270 CF socket
*
* Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include "soc_common.h"
#define COLIBRI270_RESET_GPIO 53
#define COLIBRI270_PPEN_GPIO 107
#define COLIBRI270_BVD1_GPIO 83
#define COLIBRI270_BVD2_GPIO 82
#define COLIBRI270_DETECT_GPIO 84
#define COLIBRI270_READY_GPIO 1
#define COLIBRI320_RESET_GPIO 77
#define COLIBRI320_PPEN_GPIO 57
#define COLIBRI320_BVD1_GPIO 53
#define COLIBRI320_BVD2_GPIO 79
#define COLIBRI320_DETECT_GPIO 81
#define COLIBRI320_READY_GPIO 29
enum {
DETECT = 0,
READY = 1,
BVD1 = 2,
BVD2 = 3,
PPEN = 4,
RESET = 5,
};
/* Contents of this array are configured on-the-fly in init function */
static struct gpio colibri_pcmcia_gpios[] = {
{ 0, GPIOF_IN, "PCMCIA Detect" },
{ 0, GPIOF_IN, "PCMCIA Ready" },
{ 0, GPIOF_IN, "PCMCIA BVD1" },
{ 0, GPIOF_IN, "PCMCIA BVD2" },
{ 0, GPIOF_INIT_LOW, "PCMCIA PPEN" },
{ 0, GPIOF_INIT_HIGH,"PCMCIA Reset" },
};
static int colibri_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret;
ret = gpio_request_array(colibri_pcmcia_gpios,
ARRAY_SIZE(colibri_pcmcia_gpios));
if (ret)
goto err1;
skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpios[READY].gpio);
skt->stat[SOC_STAT_CD].irq = gpio_to_irq(colibri_pcmcia_gpios[DETECT].gpio);
skt->stat[SOC_STAT_CD].name = "PCMCIA CD";
err1:
return ret;
}
static void colibri_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
gpio_free_array(colibri_pcmcia_gpios,
ARRAY_SIZE(colibri_pcmcia_gpios));
}
static void colibri_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
state->detect = !!gpio_get_value(colibri_pcmcia_gpios[DETECT].gpio);
state->ready = !!gpio_get_value(colibri_pcmcia_gpios[READY].gpio);
state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpios[BVD1].gpio);
state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpios[BVD2].gpio);
state->vs_3v = 1;
state->vs_Xv = 0;
}
static int
colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
gpio_set_value(colibri_pcmcia_gpios[PPEN].gpio,
!(state->Vcc == 33 && state->Vpp < 50));
gpio_set_value(colibri_pcmcia_gpios[RESET].gpio,
state->flags & SS_RESET);
return 0;
}
static struct pcmcia_low_level colibri_pcmcia_ops = {
.owner = THIS_MODULE,
.first = 0,
.nr = 1,
.hw_init = colibri_pcmcia_hw_init,
.hw_shutdown = colibri_pcmcia_hw_shutdown,
.socket_state = colibri_pcmcia_socket_state,
.configure_socket = colibri_pcmcia_configure_socket,
};
static struct platform_device *colibri_pcmcia_device;
static int __init colibri_pcmcia_init(void)
{
int ret;
if (!machine_is_colibri() && !machine_is_colibri320())
return -ENODEV;
colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!colibri_pcmcia_device)
return -ENOMEM;
/* Colibri PXA270 */
if (machine_is_colibri()) {
colibri_pcmcia_gpios[RESET].gpio = COLIBRI270_RESET_GPIO;
colibri_pcmcia_gpios[PPEN].gpio = COLIBRI270_PPEN_GPIO;
colibri_pcmcia_gpios[BVD1].gpio = COLIBRI270_BVD1_GPIO;
colibri_pcmcia_gpios[BVD2].gpio = COLIBRI270_BVD2_GPIO;
colibri_pcmcia_gpios[DETECT].gpio = COLIBRI270_DETECT_GPIO;
colibri_pcmcia_gpios[READY].gpio = COLIBRI270_READY_GPIO;
/* Colibri PXA320 */
} else if (machine_is_colibri320()) {
colibri_pcmcia_gpios[RESET].gpio = COLIBRI320_RESET_GPIO;
colibri_pcmcia_gpios[PPEN].gpio = COLIBRI320_PPEN_GPIO;
colibri_pcmcia_gpios[BVD1].gpio = COLIBRI320_BVD1_GPIO;
colibri_pcmcia_gpios[BVD2].gpio = COLIBRI320_BVD2_GPIO;
colibri_pcmcia_gpios[DETECT].gpio = COLIBRI320_DETECT_GPIO;
colibri_pcmcia_gpios[READY].gpio = COLIBRI320_READY_GPIO;
}
ret = platform_device_add_data(colibri_pcmcia_device,
&colibri_pcmcia_ops, sizeof(colibri_pcmcia_ops));
if (!ret)
ret = platform_device_add(colibri_pcmcia_device);
if (ret)
platform_device_put(colibri_pcmcia_device);
return ret;
}
static void __exit colibri_pcmcia_exit(void)
{
platform_device_unregister(colibri_pcmcia_device);
}
module_init(colibri_pcmcia_init);
module_exit(colibri_pcmcia_exit);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("PCMCIA support for Toradex Colibri PXA270/PXA320");
MODULE_ALIAS("platform:pxa2xx-pcmcia");
MODULE_LICENSE("GPL");
| gpl-2.0 |
master-j/e210s_cam | arch/ia64/xen/xen_pv_ops.c | 10046 | 29980 | /******************************************************************************
* arch/ia64/xen/xen_pv_ops.c
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/console.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/unistd.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/xencomm.h>
#include <asm/xen/privop.h>
#include "irq_xen.h"
#include "time.h"
/***************************************************************************
* general info
*/
static struct pv_info xen_info __initdata = {
.kernel_rpl = 2, /* or 1: determin at runtime */
.paravirt_enabled = 1,
.name = "Xen/ia64",
};
#define IA64_RSC_PL_SHIFT 2
#define IA64_RSC_PL_BIT_SIZE 2
#define IA64_RSC_PL_MASK \
(((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
static void __init
xen_info_init(void)
{
/* Xenified Linux/ia64 may run on pl = 1 or 2.
* determin at run time. */
unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
xen_info.kernel_rpl = rpl;
}
/***************************************************************************
* pv_init_ops
* initialization hooks.
*/
static void
xen_panic_hypercall(struct unw_frame_info *info, void *arg)
{
current->thread.ksp = (__u64)info->sw - 16;
HYPERVISOR_shutdown(SHUTDOWN_crash);
/* we're never actually going to get here... */
}
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
unw_init_running(xen_panic_hypercall, NULL);
/* we're never actually going to get here... */
return NOTIFY_DONE;
}
static struct notifier_block xen_panic_block = {
xen_panic_event, NULL, 0 /* try to go last */
};
static void xen_pm_power_off(void)
{
local_irq_disable();
HYPERVISOR_shutdown(SHUTDOWN_poweroff);
}
static void __init
xen_banner(void)
{
printk(KERN_INFO
"Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
"flags=0x%x\n",
xen_info.kernel_rpl,
HYPERVISOR_shared_info->arch.start_info_pfn,
xen_start_info->nr_pages, xen_start_info->flags);
}
static int __init
xen_reserve_memory(struct rsvd_region *region)
{
region->start = (unsigned long)__va(
(HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
region->end = region->start + PAGE_SIZE;
return 1;
}
static void __init
xen_arch_setup_early(void)
{
struct shared_info *s;
BUG_ON(!xen_pv_domain());
s = HYPERVISOR_shared_info;
xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
/* Must be done before any hypercall. */
xencomm_initialize();
xen_setup_features();
/* Register a call for panic conditions. */
atomic_notifier_chain_register(&panic_notifier_list,
&xen_panic_block);
pm_power_off = xen_pm_power_off;
xen_ia64_enable_opt_feature();
}
static void __init
xen_arch_setup_console(char **cmdline_p)
{
add_preferred_console("xenboot", 0, NULL);
add_preferred_console("tty", 0, NULL);
/* use hvc_xen */
add_preferred_console("hvc", 0, NULL);
#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
conswitchp = NULL;
#endif
}
static int __init
xen_arch_setup_nomca(void)
{
return 1;
}
static void __init
xen_post_smp_prepare_boot_cpu(void)
{
xen_setup_vcpu_info_placement();
}
#ifdef ASM_SUPPORTED
static unsigned long __init_or_module
xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
#endif
static void __init
xen_patch_branch(unsigned long tag, unsigned long type);
static const struct pv_init_ops xen_init_ops __initconst = {
.banner = xen_banner,
.reserve_memory = xen_reserve_memory,
.arch_setup_early = xen_arch_setup_early,
.arch_setup_console = xen_arch_setup_console,
.arch_setup_nomca = xen_arch_setup_nomca,
.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
#ifdef ASM_SUPPORTED
.patch_bundle = xen_patch_bundle,
#endif
.patch_branch = xen_patch_branch,
};
/***************************************************************************
* pv_fsys_data
* addresses for fsys
*/
extern unsigned long xen_fsyscall_table[NR_syscalls];
extern char xen_fsys_bubble_down[];
struct pv_fsys_data xen_fsys_data __initdata = {
.fsyscall_table = (unsigned long *)xen_fsyscall_table,
.fsys_bubble_down = (void *)xen_fsys_bubble_down,
};
/***************************************************************************
* pv_patchdata
* patchdata addresses
*/
#define DECLARE(name) \
extern unsigned long __xen_start_gate_##name##_patchlist[]; \
extern unsigned long __xen_end_gate_##name##_patchlist[]
DECLARE(fsyscall);
DECLARE(brl_fsys_bubble_down);
DECLARE(vtop);
DECLARE(mckinley_e9);
extern unsigned long __xen_start_gate_section[];
#define ASSIGN(name) \
.start_##name##_patchlist = \
(unsigned long)__xen_start_gate_##name##_patchlist, \
.end_##name##_patchlist = \
(unsigned long)__xen_end_gate_##name##_patchlist
static struct pv_patchdata xen_patchdata __initdata = {
ASSIGN(fsyscall),
ASSIGN(brl_fsys_bubble_down),
ASSIGN(vtop),
ASSIGN(mckinley_e9),
.gate_section = (void*)__xen_start_gate_section,
};
/***************************************************************************
* pv_cpu_ops
* intrinsics hooks.
*/
#ifndef ASM_SUPPORTED
static void
xen_set_itm_with_offset(unsigned long val)
{
/* ia64_cpu_local_tick() calls this with interrupt enabled. */
/* WARN_ON(!irqs_disabled()); */
xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
}
static unsigned long
xen_get_itm_with_offset(void)
{
/* unused at this moment */
printk(KERN_DEBUG "%s is called.\n", __func__);
WARN_ON(!irqs_disabled());
return ia64_native_getreg(_IA64_REG_CR_ITM) +
XEN_MAPPEDREGS->itc_offset;
}
/* ia64_set_itc() is only called by
* cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
* So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
*/
static void
xen_set_itc(unsigned long val)
{
unsigned long mitc;
WARN_ON(!irqs_disabled());
mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
XEN_MAPPEDREGS->itc_offset = val - mitc;
XEN_MAPPEDREGS->itc_last = val;
}
static unsigned long
xen_get_itc(void)
{
unsigned long res;
unsigned long itc_offset;
unsigned long itc_last;
unsigned long ret_itc_last;
itc_offset = XEN_MAPPEDREGS->itc_offset;
do {
itc_last = XEN_MAPPEDREGS->itc_last;
res = ia64_native_getreg(_IA64_REG_AR_ITC);
res += itc_offset;
if (itc_last >= res)
res = itc_last + 1;
ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
itc_last, res);
} while (unlikely(ret_itc_last != itc_last));
return res;
#if 0
/* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
Should it be paravirtualized instead? */
WARN_ON(!irqs_disabled());
itc_offset = XEN_MAPPEDREGS->itc_offset;
itc_last = XEN_MAPPEDREGS->itc_last;
res = ia64_native_getreg(_IA64_REG_AR_ITC);
res += itc_offset;
if (itc_last >= res)
res = itc_last + 1;
XEN_MAPPEDREGS->itc_last = res;
return res;
#endif
}
static void xen_setreg(int regnum, unsigned long val)
{
switch (regnum) {
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
break;
case _IA64_REG_AR_ITC:
xen_set_itc(val);
break;
case _IA64_REG_CR_TPR:
xen_set_tpr(val);
break;
case _IA64_REG_CR_ITM:
xen_set_itm_with_offset(val);
break;
case _IA64_REG_CR_EOI:
xen_eoi(val);
break;
default:
ia64_native_setreg_func(regnum, val);
break;
}
}
static unsigned long xen_getreg(int regnum)
{
unsigned long res;
switch (regnum) {
case _IA64_REG_PSR:
res = xen_get_psr();
break;
case _IA64_REG_AR_ITC:
res = xen_get_itc();
break;
case _IA64_REG_CR_ITM:
res = xen_get_itm_with_offset();
break;
case _IA64_REG_CR_IVR:
res = xen_get_ivr();
break;
case _IA64_REG_CR_TPR:
res = xen_get_tpr();
break;
default:
res = ia64_native_getreg_func(regnum);
break;
}
return res;
}
/* turning on interrupts is a bit more complicated.. write to the
* memory-mapped virtual psr.i bit first (to avoid race condition),
* then if any interrupts were pending, we have to execute a hyperprivop
* to ensure the pending interrupt gets delivered; else we're done! */
static void
xen_ssm_i(void)
{
int old = xen_get_virtual_psr_i();
xen_set_virtual_psr_i(1);
barrier();
if (!old && xen_get_virtual_pend())
xen_hyper_ssm_i();
}
/* turning off interrupts can be paravirtualized simply by writing
* to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
static void
xen_rsm_i(void)
{
xen_set_virtual_psr_i(0);
barrier();
}
static unsigned long
xen_get_psr_i(void)
{
return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
}
static void
xen_intrin_local_irq_restore(unsigned long mask)
{
if (mask & IA64_PSR_I)
xen_ssm_i();
else
xen_rsm_i();
}
#else
#define __DEFINE_FUNC(name, code) \
extern const char xen_ ## name ## _direct_start[]; \
extern const char xen_ ## name ## _direct_end[]; \
asm (".align 32\n" \
".proc xen_" #name "\n" \
"xen_" #name ":\n" \
"xen_" #name "_direct_start:\n" \
code \
"xen_" #name "_direct_end:\n" \
"br.cond.sptk.many b6\n" \
".endp xen_" #name "\n")
#define DEFINE_VOID_FUNC0(name, code) \
extern void \
xen_ ## name (void); \
__DEFINE_FUNC(name, code)
#define DEFINE_VOID_FUNC1(name, code) \
extern void \
xen_ ## name (unsigned long arg); \
__DEFINE_FUNC(name, code)
#define DEFINE_VOID_FUNC1_VOID(name, code) \
extern void \
xen_ ## name (void *arg); \
__DEFINE_FUNC(name, code)
#define DEFINE_VOID_FUNC2(name, code) \
extern void \
xen_ ## name (unsigned long arg0, \
unsigned long arg1); \
__DEFINE_FUNC(name, code)
#define DEFINE_FUNC0(name, code) \
extern unsigned long \
xen_ ## name (void); \
__DEFINE_FUNC(name, code)
#define DEFINE_FUNC1(name, type, code) \
extern unsigned long \
xen_ ## name (type arg); \
__DEFINE_FUNC(name, code)
#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
/*
* static void xen_set_itm_with_offset(unsigned long val)
* xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
*/
/* 2 bundles */
DEFINE_VOID_FUNC1(set_itm_with_offset,
"mov r2 = " __stringify(XSI_BASE) " + "
__stringify(XSI_ITC_OFFSET_OFS) "\n"
";;\n"
"ld8 r3 = [r2]\n"
";;\n"
"sub r8 = r8, r3\n"
"break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
/*
* static unsigned long xen_get_itm_with_offset(void)
* return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
*/
/* 2 bundles */
DEFINE_FUNC0(get_itm_with_offset,
"mov r2 = " __stringify(XSI_BASE) " + "
__stringify(XSI_ITC_OFFSET_OFS) "\n"
";;\n"
"ld8 r3 = [r2]\n"
"mov r8 = cr.itm\n"
";;\n"
"add r8 = r8, r2\n");
/*
* static void xen_set_itc(unsigned long val)
* unsigned long mitc;
*
* WARN_ON(!irqs_disabled());
* mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
* XEN_MAPPEDREGS->itc_offset = val - mitc;
* XEN_MAPPEDREGS->itc_last = val;
*/
/* 2 bundles */
DEFINE_VOID_FUNC1(set_itc,
"mov r2 = " __stringify(XSI_BASE) " + "
__stringify(XSI_ITC_LAST_OFS) "\n"
"mov r3 = ar.itc\n"
";;\n"
"sub r3 = r8, r3\n"
"st8 [r2] = r8, "
__stringify(XSI_ITC_LAST_OFS) " - "
__stringify(XSI_ITC_OFFSET_OFS) "\n"
";;\n"
"st8 [r2] = r3\n");
/*
* static unsigned long xen_get_itc(void)
* unsigned long res;
* unsigned long itc_offset;
* unsigned long itc_last;
* unsigned long ret_itc_last;
*
* itc_offset = XEN_MAPPEDREGS->itc_offset;
* do {
* itc_last = XEN_MAPPEDREGS->itc_last;
* res = ia64_native_getreg(_IA64_REG_AR_ITC);
* res += itc_offset;
* if (itc_last >= res)
* res = itc_last + 1;
* ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
* itc_last, res);
* } while (unlikely(ret_itc_last != itc_last));
* return res;
*/
/* 5 bundles */
DEFINE_FUNC0(get_itc,
"mov r2 = " __stringify(XSI_BASE) " + "
__stringify(XSI_ITC_OFFSET_OFS) "\n"
";;\n"
"ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
__stringify(XSI_ITC_OFFSET_OFS) "\n"
/* r9 = itc_offset */
/* r2 = XSI_ITC_OFFSET */
"888:\n"
"mov r8 = ar.itc\n" /* res = ar.itc */
";;\n"
"ld8 r3 = [r2]\n" /* r3 = itc_last */
"add r8 = r8, r9\n" /* res = ar.itc + itc_offset */
";;\n"
"cmp.gtu p6, p0 = r3, r8\n"
";;\n"
"(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */
";;\n"
"mov ar.ccv = r8\n"
";;\n"
"cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
";;\n"
"cmp.ne p6, p0 = r10, r3\n"
"(p6) hint @pause\n"
"(p6) br.cond.spnt 888b\n");
DEFINE_VOID_FUNC1_VOID(fc,
"break " __stringify(HYPERPRIVOP_FC) "\n");
/*
* psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
* masked_addr = *psr_i_addr_addr
* pending_intr_addr = masked_addr - 1
* if (val & IA64_PSR_I) {
* masked = *masked_addr
* *masked_addr = 0:xen_set_virtual_psr_i(1)
* compiler barrier
* if (masked) {
* uint8_t pending = *pending_intr_addr;
* if (pending)
* XEN_HYPER_SSM_I
* }
* } else {
* *masked_addr = 1:xen_set_virtual_psr_i(0)
* }
*/
/* 6 bundles */
DEFINE_VOID_FUNC1(intrin_local_irq_restore,
/* r8 = input value: 0 or IA64_PSR_I
* p6 = (flags & IA64_PSR_I)
* = if clause
* p7 = !(flags & IA64_PSR_I)
* = else clause
*/
"cmp.ne p6, p7 = r8, r0\n"
"mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
";;\n"
/* r9 = XEN_PSR_I_ADDR */
"ld8 r9 = [r9]\n"
";;\n"
/* r10 = masked previous value */
"(p6) ld1.acq r10 = [r9]\n"
";;\n"
/* p8 = !masked interrupt masked previously? */
"(p6) cmp.ne.unc p8, p0 = r10, r0\n"
/* p7 = else clause */
"(p7) mov r11 = 1\n"
";;\n"
/* masked = 1 */
"(p7) st1.rel [r9] = r11\n"
/* p6 = if clause */
/* masked = 0
* r9 = masked_addr - 1
* = pending_intr_addr
*/
"(p8) st1.rel [r9] = r0, -1\n"
";;\n"
/* r8 = pending_intr */
"(p8) ld1.acq r11 = [r9]\n"
";;\n"
/* p9 = interrupt pending? */
"(p8) cmp.ne.unc p9, p10 = r11, r0\n"
";;\n"
"(p10) mf\n"
/* issue hypercall to trigger interrupt */
"(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
DEFINE_VOID_FUNC2(ptcga,
"break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
DEFINE_VOID_FUNC2(set_rr,
"break " __stringify(HYPERPRIVOP_SET_RR) "\n");
/*
* tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
* tmp = *tmp
* tmp = *tmp;
* psr_i = tmp? 0: IA64_PSR_I;
*/
/* 4 bundles */
DEFINE_FUNC0(get_psr_i,
"mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
";;\n"
"ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */
"mov r8 = 0\n" /* psr_i = 0 */
";;\n"
"ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */
";;\n"
"cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */
";;\n"
"(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
DEFINE_FUNC1(thash, unsigned long,
"break " __stringify(HYPERPRIVOP_THASH) "\n");
DEFINE_FUNC1(get_cpuid, int,
"break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
DEFINE_FUNC1(get_pmd, int,
"break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
DEFINE_FUNC1(get_rr, unsigned long,
"break " __stringify(HYPERPRIVOP_GET_RR) "\n");
/*
* void xen_privop_ssm_i(void)
*
* int masked = !xen_get_virtual_psr_i();
* // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
* xen_set_virtual_psr_i(1)
* // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
* // compiler barrier
* if (masked) {
* uint8_t* pend_int_addr =
* (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
* uint8_t pending = *pend_int_addr;
* if (pending)
* XEN_HYPER_SSM_I
* }
*/
/* 4 bundles */
DEFINE_VOID_FUNC0(ssm_i,
"mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
";;\n"
"ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */
";;\n"
"ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */
";;\n"
"st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt
* r8 = XEN_PSR_I_ADDR - 1
* = pend_int_addr
*/
"cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
* previously interrupt
* masked?
*/
";;\n"
"(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */
";;\n"
"(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/
";;\n"
/* issue hypercall to get interrupt */
"(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
";;\n");
/*
* psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
* = XEN_PSR_I_ADDR_ADDR;
* psr_i_addr = *psr_i_addr_addr;
* *psr_i_addr = 1;
*/
/* 2 bundles */
DEFINE_VOID_FUNC0(rsm_i,
"mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
/* r8 = XEN_PSR_I_ADDR */
"mov r9 = 1\n"
";;\n"
"ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */
";;\n"
"st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */
extern void
xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
unsigned long val2, unsigned long val3,
unsigned long val4);
__DEFINE_FUNC(set_rr0_to_rr4,
"break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
extern unsigned long xen_getreg(int regnum);
#define __DEFINE_GET_REG(id, privop) \
"mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
";;\n" \
"cmp.eq p6, p0 = r2, r8\n" \
";;\n" \
"(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \
"(p6) br.cond.sptk.many b6\n" \
";;\n"
__DEFINE_FUNC(getreg,
__DEFINE_GET_REG(PSR, PSR)
/* get_itc */
"mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
";;\n"
"cmp.eq p6, p0 = r2, r8\n"
";;\n"
"(p6) br.cond.spnt xen_get_itc\n"
";;\n"
/* get itm */
"mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
";;\n"
"cmp.eq p6, p0 = r2, r8\n"
";;\n"
"(p6) br.cond.spnt xen_get_itm_with_offset\n"
";;\n"
__DEFINE_GET_REG(CR_IVR, IVR)
__DEFINE_GET_REG(CR_TPR, TPR)
/* fall back */
"movl r2 = ia64_native_getreg_func\n"
";;\n"
"mov b7 = r2\n"
";;\n"
"br.cond.sptk.many b7\n");
extern void xen_setreg(int regnum, unsigned long val);
#define __DEFINE_SET_REG(id, privop) \
"mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
";;\n" \
"cmp.eq p6, p0 = r2, r9\n" \
";;\n" \
"(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \
"(p6) br.cond.sptk.many b6\n" \
";;\n"
__DEFINE_FUNC(setreg,
/* kr0 .. kr 7*/
/*
* if (_IA64_REG_AR_KR0 <= regnum &&
* regnum <= _IA64_REG_AR_KR7) {
* register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
* register __val asm ("r9") = val
* "break HYPERPRIVOP_SET_KR"
* }
*/
"mov r17 = r9\n"
"mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
";;\n"
"cmp.ge p6, p0 = r9, r2\n"
"sub r17 = r17, r2\n"
";;\n"
"(p6) cmp.ge.unc p7, p0 = "
__stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
", r17\n"
";;\n"
"(p7) mov r9 = r8\n"
";;\n"
"(p7) mov r8 = r17\n"
"(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
/* set itm */
"mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
";;\n"
"cmp.eq p6, p0 = r2, r8\n"
";;\n"
"(p6) br.cond.spnt xen_set_itm_with_offset\n"
/* set itc */
"mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
";;\n"
"cmp.eq p6, p0 = r2, r8\n"
";;\n"
"(p6) br.cond.spnt xen_set_itc\n"
__DEFINE_SET_REG(CR_TPR, SET_TPR)
__DEFINE_SET_REG(CR_EOI, EOI)
/* fall back */
"movl r2 = ia64_native_setreg_func\n"
";;\n"
"mov b7 = r2\n"
";;\n"
"br.cond.sptk.many b7\n");
#endif
static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.fc = xen_fc,
.thash = xen_thash,
.get_cpuid = xen_get_cpuid,
.get_pmd = xen_get_pmd,
.getreg = xen_getreg,
.setreg = xen_setreg,
.ptcga = xen_ptcga,
.get_rr = xen_get_rr,
.set_rr = xen_set_rr,
.set_rr0_to_rr4 = xen_set_rr0_to_rr4,
.ssm_i = xen_ssm_i,
.rsm_i = xen_rsm_i,
.get_psr_i = xen_get_psr_i,
.intrin_local_irq_restore
= xen_intrin_local_irq_restore,
};
/******************************************************************************
* replacement of hand written assembly codes.
*/
extern char xen_switch_to;
extern char xen_leave_syscall;
extern char xen_work_processed_syscall;
extern char xen_leave_kernel;
const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
.switch_to = (unsigned long)&xen_switch_to,
.leave_syscall = (unsigned long)&xen_leave_syscall,
.work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
.leave_kernel = (unsigned long)&xen_leave_kernel,
};
/***************************************************************************
* pv_iosapic_ops
* iosapic read/write hooks.
*/
static void
xen_pcat_compat_init(void)
{
/* nothing */
}
static struct irq_chip*
xen_iosapic_get_irq_chip(unsigned long trigger)
{
return NULL;
}
static unsigned int
xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
{
struct physdev_apic apic_op;
int ret;
apic_op.apic_physbase = (unsigned long)iosapic -
__IA64_UNCACHED_OFFSET;
apic_op.reg = reg;
ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
if (ret)
return ret;
return apic_op.value;
}
static void
xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
struct physdev_apic apic_op;
apic_op.apic_physbase = (unsigned long)iosapic -
__IA64_UNCACHED_OFFSET;
apic_op.reg = reg;
apic_op.value = val;
HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
}
static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
.pcat_compat_init = xen_pcat_compat_init,
.__get_irq_chip = xen_iosapic_get_irq_chip,
.__read = xen_iosapic_read,
.__write = xen_iosapic_write,
};
/***************************************************************************
* pv_ops initialization
*/
void __init
xen_setup_pv_ops(void)
{
xen_info_init();
pv_info = xen_info;
pv_init_ops = xen_init_ops;
pv_fsys_data = xen_fsys_data;
pv_patchdata = xen_patchdata;
pv_cpu_ops = xen_cpu_ops;
pv_iosapic_ops = xen_iosapic_ops;
pv_irq_ops = xen_irq_ops;
pv_time_ops = xen_time_ops;
paravirt_cpu_asm_init(&xen_cpu_asm_switch);
}
#ifdef ASM_SUPPORTED
/***************************************************************************
* binary pacthing
* pv_init_ops.patch_bundle
*/
#define DEFINE_FUNC_GETREG(name, privop) \
DEFINE_FUNC0(get_ ## name, \
"break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
DEFINE_FUNC_GETREG(psr, PSR);
DEFINE_FUNC_GETREG(eflag, EFLAG);
DEFINE_FUNC_GETREG(ivr, IVR);
DEFINE_FUNC_GETREG(tpr, TPR);
#define DEFINE_FUNC_SET_KR(n) \
DEFINE_VOID_FUNC0(set_kr ## n, \
";;\n" \
"mov r9 = r8\n" \
"mov r8 = " #n "\n" \
"break " __stringify(HYPERPRIVOP_SET_KR) "\n")
DEFINE_FUNC_SET_KR(0);
DEFINE_FUNC_SET_KR(1);
DEFINE_FUNC_SET_KR(2);
DEFINE_FUNC_SET_KR(3);
DEFINE_FUNC_SET_KR(4);
DEFINE_FUNC_SET_KR(5);
DEFINE_FUNC_SET_KR(6);
DEFINE_FUNC_SET_KR(7);
#define __DEFINE_FUNC_SETREG(name, privop) \
DEFINE_VOID_FUNC0(name, \
"break "__stringify(HYPERPRIVOP_ ## privop) "\n")
#define DEFINE_FUNC_SETREG(name, privop) \
__DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
DEFINE_FUNC_SETREG(eflag, EFLAG);
DEFINE_FUNC_SETREG(tpr, TPR);
__DEFINE_FUNC_SETREG(eoi, EOI);
extern const char xen_check_events[];
extern const char __xen_intrin_local_irq_restore_direct_start[];
extern const char __xen_intrin_local_irq_restore_direct_end[];
extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
asm (
".align 32\n"
".proc xen_check_events\n"
"xen_check_events:\n"
/* masked = 0
* r9 = masked_addr - 1
* = pending_intr_addr
*/
"st1.rel [r9] = r0, -1\n"
";;\n"
/* r8 = pending_intr */
"ld1.acq r11 = [r9]\n"
";;\n"
/* p9 = interrupt pending? */
"cmp.ne p9, p10 = r11, r0\n"
";;\n"
"(p10) mf\n"
/* issue hypercall to trigger interrupt */
"(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
"br.cond.sptk.many b6\n"
".endp xen_check_events\n"
"\n"
".align 32\n"
".proc __xen_intrin_local_irq_restore_direct\n"
"__xen_intrin_local_irq_restore_direct:\n"
"__xen_intrin_local_irq_restore_direct_start:\n"
"1:\n"
"{\n"
"cmp.ne p6, p7 = r8, r0\n"
"mov r17 = ip\n" /* get ip to calc return address */
"mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
";;\n"
"}\n"
"{\n"
/* r9 = XEN_PSR_I_ADDR */
"ld8 r9 = [r9]\n"
";;\n"
/* r10 = masked previous value */
"(p6) ld1.acq r10 = [r9]\n"
"adds r17 = 1f - 1b, r17\n" /* calculate return address */
";;\n"
"}\n"
"{\n"
/* p8 = !masked interrupt masked previously? */
"(p6) cmp.ne.unc p8, p0 = r10, r0\n"
"\n"
/* p7 = else clause */
"(p7) mov r11 = 1\n"
";;\n"
"(p8) mov b6 = r17\n" /* set return address */
"}\n"
"{\n"
/* masked = 1 */
"(p7) st1.rel [r9] = r11\n"
"\n"
"[99:]\n"
"(p8) brl.cond.dptk.few xen_check_events\n"
"}\n"
/* pv calling stub is 5 bundles. fill nop to adjust return address */
"{\n"
"nop 0\n"
"nop 0\n"
"nop 0\n"
"}\n"
"1:\n"
"__xen_intrin_local_irq_restore_direct_end:\n"
".endp __xen_intrin_local_irq_restore_direct\n"
"\n"
".align 8\n"
"__xen_intrin_local_irq_restore_direct_reloc:\n"
"data8 99b\n"
);
static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
__initdata_or_module =
{
#define XEN_PATCH_BUNDLE_ELEM(name, type) \
{ \
(void*)xen_ ## name ## _direct_start, \
(void*)xen_ ## name ## _direct_end, \
PARAVIRT_PATCH_TYPE_ ## type, \
}
XEN_PATCH_BUNDLE_ELEM(fc, FC),
XEN_PATCH_BUNDLE_ELEM(thash, THASH),
XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
{
(void*)__xen_intrin_local_irq_restore_direct_start,
(void*)__xen_intrin_local_irq_restore_direct_end,
PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
},
#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
{ \
xen_get_ ## name ## _direct_start, \
xen_get_ ## name ## _direct_end, \
PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
}
XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
{ \
xen_ ## name ## _direct_start, \
xen_ ## name ## _direct_end, \
PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
}
#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
__XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
__XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
};
static unsigned long __init_or_module
xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
{
const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
sizeof(xen_patch_bundle_elems[0]);
unsigned long used;
const struct paravirt_patch_bundle_elem *found;
used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
xen_patch_bundle_elems, nelems,
&found);
if (found == NULL)
/* fallback */
return ia64_native_patch_bundle(sbundle, ebundle, type);
if (used == 0)
return used;
/* relocation */
switch (type) {
case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
unsigned long reloc =
__xen_intrin_local_irq_restore_direct_reloc;
unsigned long reloc_offset = reloc - (unsigned long)
__xen_intrin_local_irq_restore_direct_start;
unsigned long tag = (unsigned long)sbundle + reloc_offset;
paravirt_patch_reloc_brl(tag, xen_check_events);
break;
}
default:
/* nothing */
break;
}
return used;
}
#endif /* ASM_SUPPOTED */
const struct paravirt_patch_branch_target xen_branch_target[]
__initconst = {
#define PARAVIRT_BR_TARGET(name, type) \
{ \
&xen_ ## name, \
PARAVIRT_PATCH_TYPE_BR_ ## type, \
}
PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
};
static void __init
xen_patch_branch(unsigned long tag, unsigned long type)
{
__paravirt_patch_apply_branch(tag, type, xen_branch_target,
ARRAY_SIZE(xen_branch_target));
}
| gpl-2.0 |
agat63/AGAT_GS3_kernel | drivers/scsi/ses.c | 10302 | 17917 | /*
* SCSI Enclosure Services
*
* Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the GNU General Public License
** version 2 as published by the Free Software Foundation.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/enclosure.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_host.h>
struct ses_device {
unsigned char *page1;
unsigned char *page1_types;
unsigned char *page2;
unsigned char *page10;
short page1_len;
short page1_num_types;
short page2_len;
short page10_len;
};
struct ses_component {
u64 addr;
unsigned char *desc;
};
static int ses_probe(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
int err = -ENODEV;
if (sdev->type != TYPE_ENCLOSURE)
goto out;
err = 0;
sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n");
out:
return err;
}
#define SES_TIMEOUT (30 * HZ)
#define SES_RETRIES 3
static int ses_recv_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
unsigned char cmd[] = {
RECEIVE_DIAGNOSTIC,
1, /* Set PCV bit */
page_code,
bufflen >> 8,
bufflen & 0xff,
0
};
return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES, NULL);
}
static int ses_send_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
u32 result;
unsigned char cmd[] = {
SEND_DIAGNOSTIC,
0x10, /* Set PF bit */
0,
bufflen >> 8,
bufflen & 0xff,
0
};
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
return result;
}
static int ses_set_page2_descriptor(struct enclosure_device *edev,
struct enclosure_component *ecomp,
unsigned char *desc)
{
int i, j, count = 0, descriptor = ecomp->number;
struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
struct ses_device *ses_dev = edev->scratch;
unsigned char *type_ptr = ses_dev->page1_types;
unsigned char *desc_ptr = ses_dev->page2 + 8;
/* Clear everything */
memset(desc_ptr, 0, ses_dev->page2_len - 8);
for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
desc_ptr += 4;
if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
continue;
if (count++ == descriptor) {
memcpy(desc_ptr, desc, 4);
/* set select */
desc_ptr[0] |= 0x80;
/* clear reserved, just in case */
desc_ptr[0] &= 0xf0;
}
}
}
return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
}
static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
int i, j, count = 0, descriptor = ecomp->number;
struct scsi_device *sdev = to_scsi_device(edev->edev.parent);
struct ses_device *ses_dev = edev->scratch;
unsigned char *type_ptr = ses_dev->page1_types;
unsigned char *desc_ptr = ses_dev->page2 + 8;
ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
for (i = 0; i < ses_dev->page1_num_types; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
desc_ptr += 4;
if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
continue;
if (count++ == descriptor)
return desc_ptr;
}
}
return NULL;
}
/* For device slot and array device slot elements, byte 3 bit 6
* is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this
* code stands these bits are shifted 4 positions right so in
* sysfs they will appear as bits 2 and 1 respectively. Strange. */
static void ses_get_fault(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
unsigned char *desc;
desc = ses_get_page2_descriptor(edev, ecomp);
if (desc)
ecomp->fault = (desc[3] & 0x60) >> 4;
}
static int ses_set_fault(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
unsigned char desc[4] = {0 };
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
/* zero is disabled */
break;
case ENCLOSURE_SETTING_ENABLED:
desc[3] = 0x20;
break;
default:
/* SES doesn't do the SGPIO blink settings */
return -EINVAL;
}
return ses_set_page2_descriptor(edev, ecomp, desc);
}
static void ses_get_status(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
unsigned char *desc;
desc = ses_get_page2_descriptor(edev, ecomp);
if (desc)
ecomp->status = (desc[0] & 0x0f);
}
static void ses_get_locate(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
unsigned char *desc;
desc = ses_get_page2_descriptor(edev, ecomp);
if (desc)
ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
}
static int ses_set_locate(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
unsigned char desc[4] = {0 };
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
/* zero is disabled */
break;
case ENCLOSURE_SETTING_ENABLED:
desc[2] = 0x02;
break;
default:
/* SES doesn't do the SGPIO blink settings */
return -EINVAL;
}
return ses_set_page2_descriptor(edev, ecomp, desc);
}
static int ses_set_active(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
unsigned char desc[4] = {0 };
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
/* zero is disabled */
ecomp->active = 0;
break;
case ENCLOSURE_SETTING_ENABLED:
desc[2] = 0x80;
ecomp->active = 1;
break;
default:
/* SES doesn't do the SGPIO blink settings */
return -EINVAL;
}
return ses_set_page2_descriptor(edev, ecomp, desc);
}
static struct enclosure_component_callbacks ses_enclosure_callbacks = {
.get_fault = ses_get_fault,
.set_fault = ses_set_fault,
.get_status = ses_get_status,
.get_locate = ses_get_locate,
.set_locate = ses_set_locate,
.set_active = ses_set_active,
};
struct ses_host_edev {
struct Scsi_Host *shost;
struct enclosure_device *edev;
};
#if 0
int ses_match_host(struct enclosure_device *edev, void *data)
{
struct ses_host_edev *sed = data;
struct scsi_device *sdev;
if (!scsi_is_sdev_device(edev->edev.parent))
return 0;
sdev = to_scsi_device(edev->edev.parent);
if (sdev->host != sed->shost)
return 0;
sed->edev = edev;
return 1;
}
#endif /* 0 */
static void ses_process_descriptor(struct enclosure_component *ecomp,
unsigned char *desc)
{
int eip = desc[0] & 0x10;
int invalid = desc[0] & 0x80;
enum scsi_protocol proto = desc[0] & 0x0f;
u64 addr = 0;
struct ses_component *scomp = ecomp->scratch;
unsigned char *d;
scomp->desc = desc;
if (invalid)
return;
switch (proto) {
case SCSI_PROTOCOL_SAS:
if (eip)
d = desc + 8;
else
d = desc + 4;
/* only take the phy0 addr */
addr = (u64)d[12] << 56 |
(u64)d[13] << 48 |
(u64)d[14] << 40 |
(u64)d[15] << 32 |
(u64)d[16] << 24 |
(u64)d[17] << 16 |
(u64)d[18] << 8 |
(u64)d[19];
break;
default:
/* FIXME: Need to add more protocols than just SAS */
break;
}
scomp->addr = addr;
}
struct efd {
u64 addr;
struct device *dev;
};
static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
void *data)
{
struct efd *efd = data;
int i;
struct ses_component *scomp;
if (!edev->component[0].scratch)
return 0;
for (i = 0; i < edev->components; i++) {
scomp = edev->component[i].scratch;
if (scomp->addr != efd->addr)
continue;
enclosure_add_device(edev, i, efd->dev);
return 1;
}
return 0;
}
#define INIT_ALLOC_SIZE 32
static void ses_enclosure_data_process(struct enclosure_device *edev,
struct scsi_device *sdev,
int create)
{
u32 result;
unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL;
int i, j, page7_len, len, components;
struct ses_device *ses_dev = edev->scratch;
int types = ses_dev->page1_num_types;
unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
if (!hdr_buf)
goto simple_populate;
/* re-read page 10 */
if (ses_dev->page10)
ses_recv_diag(sdev, 10, ses_dev->page10, ses_dev->page10_len);
/* Page 7 for the descriptors is optional */
result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto simple_populate;
page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
/* add 1 for trailing '\0' we'll use */
buf = kzalloc(len + 1, GFP_KERNEL);
if (!buf)
goto simple_populate;
result = ses_recv_diag(sdev, 7, buf, len);
if (result) {
simple_populate:
kfree(buf);
buf = NULL;
desc_ptr = NULL;
len = 0;
page7_len = 0;
} else {
desc_ptr = buf + 8;
len = (desc_ptr[2] << 8) + desc_ptr[3];
/* skip past overall descriptor */
desc_ptr += len + 4;
}
if (ses_dev->page10)
addl_desc_ptr = ses_dev->page10 + 8;
type_ptr = ses_dev->page1_types;
components = 0;
for (i = 0; i < types; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
char *name = NULL;
struct enclosure_component *ecomp;
if (desc_ptr) {
if (desc_ptr >= buf + page7_len) {
desc_ptr = NULL;
} else {
len = (desc_ptr[2] << 8) + desc_ptr[3];
desc_ptr += 4;
/* Add trailing zero - pushes into
* reserved space */
desc_ptr[len] = '\0';
name = desc_ptr;
}
}
if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
if (create)
ecomp = enclosure_component_register(edev,
components++,
type_ptr[0],
name);
else
ecomp = &edev->component[components++];
if (!IS_ERR(ecomp) && addl_desc_ptr)
ses_process_descriptor(ecomp,
addl_desc_ptr);
}
if (desc_ptr)
desc_ptr += len;
if (addl_desc_ptr)
addl_desc_ptr += addl_desc_ptr[1] + 2;
}
}
kfree(buf);
kfree(hdr_buf);
}
static void ses_match_to_enclosure(struct enclosure_device *edev,
struct scsi_device *sdev)
{
unsigned char *buf;
unsigned char *desc;
unsigned int vpd_len;
struct efd efd = {
.addr = 0,
};
buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
goto free;
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
vpd_len = ((buf[2] << 8) | buf[3]) + 4;
kfree(buf);
buf = kmalloc(vpd_len, GFP_KERNEL);
if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
goto free;
desc = buf + 4;
while (desc < buf + vpd_len) {
enum scsi_protocol proto = desc[0] >> 4;
u8 code_set = desc[0] & 0x0f;
u8 piv = desc[1] & 0x80;
u8 assoc = (desc[1] & 0x30) >> 4;
u8 type = desc[1] & 0x0f;
u8 len = desc[3];
if (piv && code_set == 1 && assoc == 1
&& proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
efd.addr = (u64)desc[4] << 56 |
(u64)desc[5] << 48 |
(u64)desc[6] << 40 |
(u64)desc[7] << 32 |
(u64)desc[8] << 24 |
(u64)desc[9] << 16 |
(u64)desc[10] << 8 |
(u64)desc[11];
desc += len + 4;
}
if (!efd.addr)
goto free;
efd.dev = &sdev->sdev_gendev;
enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
free:
kfree(buf);
}
static int ses_intf_add(struct device *cdev,
struct class_interface *intf)
{
struct scsi_device *sdev = to_scsi_device(cdev->parent);
struct scsi_device *tmp_sdev;
unsigned char *buf = NULL, *hdr_buf, *type_ptr;
struct ses_device *ses_dev;
u32 result;
int i, types, len, components = 0;
int err = -ENOMEM;
int num_enclosures;
struct enclosure_device *edev;
struct ses_component *scomp = NULL;
if (!scsi_device_enclosure(sdev)) {
/* not an enclosure, but might be in one */
struct enclosure_device *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
ses_match_to_enclosure(edev, sdev);
prev = edev;
}
return -ENODEV;
}
/* TYPE_ENCLOSURE prints a message in probe */
if (sdev->type != TYPE_ENCLOSURE)
sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n");
ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL);
hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
if (!hdr_buf || !ses_dev)
goto err_init_free;
result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto recv_failed;
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
goto err_free;
result = ses_recv_diag(sdev, 1, buf, len);
if (result)
goto recv_failed;
types = 0;
/* we always have one main enclosure and the rest are referred
* to as secondary subenclosures */
num_enclosures = buf[1] + 1;
/* begin at the enclosure descriptor */
type_ptr = buf + 8;
/* skip all the enclosure descriptors */
for (i = 0; i < num_enclosures && type_ptr < buf + len; i++) {
types += type_ptr[2];
type_ptr += type_ptr[3] + 4;
}
ses_dev->page1_types = type_ptr;
ses_dev->page1_num_types = types;
for (i = 0; i < types && type_ptr < buf + len; i++, type_ptr += 4) {
if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
components += type_ptr[1];
}
ses_dev->page1 = buf;
ses_dev->page1_len = len;
buf = NULL;
result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto recv_failed;
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
goto err_free;
/* make sure getting page 2 actually works */
result = ses_recv_diag(sdev, 2, buf, len);
if (result)
goto recv_failed;
ses_dev->page2 = buf;
ses_dev->page2_len = len;
buf = NULL;
/* The additional information page --- allows us
* to match up the devices */
result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
if (!result) {
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
goto err_free;
result = ses_recv_diag(sdev, 10, buf, len);
if (result)
goto recv_failed;
ses_dev->page10 = buf;
ses_dev->page10_len = len;
buf = NULL;
}
scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
if (!scomp)
goto err_free;
edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
components, &ses_enclosure_callbacks);
if (IS_ERR(edev)) {
err = PTR_ERR(edev);
goto err_free;
}
kfree(hdr_buf);
edev->scratch = ses_dev;
for (i = 0; i < components; i++)
edev->component[i].scratch = scomp + i;
ses_enclosure_data_process(edev, sdev, 1);
/* see if there are any devices matching before
* we found the enclosure */
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
ses_match_to_enclosure(edev, tmp_sdev);
}
return 0;
recv_failed:
sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
result);
err = -ENODEV;
err_free:
kfree(buf);
kfree(scomp);
kfree(ses_dev->page10);
kfree(ses_dev->page2);
kfree(ses_dev->page1);
err_init_free:
kfree(ses_dev);
kfree(hdr_buf);
sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err);
return err;
}
static int ses_remove(struct device *dev)
{
return 0;
}
static void ses_intf_remove_component(struct scsi_device *sdev)
{
struct enclosure_device *edev, *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
prev = edev;
if (!enclosure_remove_device(edev, &sdev->sdev_gendev))
break;
}
if (edev)
put_device(&edev->edev);
}
static void ses_intf_remove_enclosure(struct scsi_device *sdev)
{
struct enclosure_device *edev;
struct ses_device *ses_dev;
/* exact match to this enclosure */
edev = enclosure_find(&sdev->sdev_gendev, NULL);
if (!edev)
return;
ses_dev = edev->scratch;
edev->scratch = NULL;
kfree(ses_dev->page10);
kfree(ses_dev->page1);
kfree(ses_dev->page2);
kfree(ses_dev);
kfree(edev->component[0].scratch);
put_device(&edev->edev);
enclosure_unregister(edev);
}
static void ses_intf_remove(struct device *cdev,
struct class_interface *intf)
{
struct scsi_device *sdev = to_scsi_device(cdev->parent);
if (!scsi_device_enclosure(sdev))
ses_intf_remove_component(sdev);
else
ses_intf_remove_enclosure(sdev);
}
static struct class_interface ses_interface = {
.add_dev = ses_intf_add,
.remove_dev = ses_intf_remove,
};
static struct scsi_driver ses_template = {
.owner = THIS_MODULE,
.gendrv = {
.name = "ses",
.probe = ses_probe,
.remove = ses_remove,
},
};
static int __init ses_init(void)
{
int err;
err = scsi_register_interface(&ses_interface);
if (err)
return err;
err = scsi_register_driver(&ses_template.gendrv);
if (err)
goto out_unreg;
return 0;
out_unreg:
scsi_unregister_interface(&ses_interface);
return err;
}
static void __exit ses_exit(void)
{
scsi_unregister_driver(&ses_template.gendrv);
scsi_unregister_interface(&ses_interface);
}
module_init(ses_init);
module_exit(ses_exit);
MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE);
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
anbulang/sctp-cmt | arch/sh/boot/compressed/misc.c | 11326 | 2699 | /*
* arch/sh/boot/compressed/misc.c
*
* This is a collection of several routines from gzip-1.0.3
* adapted for Linux.
*
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*
* Adapted for SH by Stuart Menefy, Aug 1999
*
* Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000
*/
#include <asm/uaccess.h>
#include <asm/addrspace.h>
#include <asm/page.h>
/*
* gzip declarations
*/
#define STATIC static
#undef memset
#undef memcpy
#define memzero(s, n) memset ((s), 0, (n))
/* cache.c */
#define CACHE_ENABLE 0
#define CACHE_DISABLE 1
int cache_control(unsigned int command);
extern char input_data[];
extern int input_len;
static unsigned char *output;
static void error(char *m);
int puts(const char *);
extern int _text; /* Defined in vmlinux.lds.S */
extern int _end;
static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#define HEAP_SIZE 0x400000
#else
#define HEAP_SIZE 0x10000
#endif
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
int puts(const char *s)
{
/* This should be updated to use the sh-sci routines */
return 0;
}
void* memset(void* s, int c, size_t n)
{
int i;
char *ss = (char*)s;
for (i=0;i<n;i++) ss[i] = c;
return s;
}
void* memcpy(void* __dest, __const void* __src,
size_t __n)
{
int i;
char *d = (char *)__dest, *s = (char *)__src;
for (i=0;i<__n;i++) d[i] = s[i];
return __dest;
}
static void error(char *x)
{
puts("\n\n");
puts(x);
puts("\n\n -- System halted");
while(1); /* Halt */
}
#ifdef CONFIG_SUPERH64
#define stackalign 8
#else
#define stackalign 4
#endif
#define STACK_SIZE (4096)
long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE];
long *stack_start = &user_stack[STACK_SIZE];
void decompress_kernel(void)
{
unsigned long output_addr;
#ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
#endif
output = (unsigned char *)output_addr;
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
puts("Uncompressing Linux... ");
cache_control(CACHE_ENABLE);
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
cache_control(CACHE_DISABLE);
puts("Ok, booting the kernel.\n");
}
| gpl-2.0 |
ARMP/bproj-black | drivers/infiniband/hw/cxgb3/iwch_cq.c | 14142 | 5827 | /*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "iwch_provider.h"
#include "iwch.h"
/*
* Get one cq entry from cxio and map it to openib.
*
* Returns:
* 0 EMPTY;
* 1 cqe returned
* -EAGAIN caller must try again
* any other -errno fatal error
*/
static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
struct ib_wc *wc)
{
struct iwch_qp *qhp = NULL;
struct t3_cqe cqe, *rd_cqe;
struct t3_wq *wq;
u32 credit = 0;
u8 cqe_flushed;
u64 cookie;
int ret = 1;
rd_cqe = cxio_next_cqe(&chp->cq);
if (!rd_cqe)
return 0;
qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
if (!qhp)
wq = NULL;
else {
spin_lock(&qhp->lock);
wq = &(qhp->wq);
}
ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
&credit);
if (t3a_device(chp->rhp) && credit) {
PDBG("%s updating %d cq credits on id %d\n", __func__,
credit, chp->cq.cqid);
cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
}
if (ret) {
ret = -EAGAIN;
goto out;
}
ret = 1;
wc->wr_id = cookie;
wc->qp = &qhp->ibqp;
wc->vendor_err = CQE_STATUS(cqe);
wc->wc_flags = 0;
PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
"lo 0x%x cookie 0x%llx\n", __func__,
CQE_QPID(cqe), CQE_TYPE(cqe),
CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
CQE_WRID_LOW(cqe), (unsigned long long) cookie);
if (CQE_TYPE(cqe) == 0) {
if (!CQE_STATUS(cqe))
wc->byte_len = CQE_LEN(cqe);
else
wc->byte_len = 0;
wc->opcode = IB_WC_RECV;
if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
} else {
switch (CQE_OPCODE(cqe)) {
case T3_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case T3_READ_REQ:
wc->opcode = IB_WC_RDMA_READ;
wc->byte_len = CQE_LEN(cqe);
break;
case T3_SEND:
case T3_SEND_WITH_SE:
case T3_SEND_WITH_INV:
case T3_SEND_WITH_SE_INV:
wc->opcode = IB_WC_SEND;
break;
case T3_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
case T3_LOCAL_INV:
wc->opcode = IB_WC_LOCAL_INV;
break;
case T3_FAST_REGISTER:
wc->opcode = IB_WC_FAST_REG_MR;
break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
"in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(cqe), CQE_QPID(cqe));
ret = -EINVAL;
goto out;
}
}
if (cqe_flushed)
wc->status = IB_WC_WR_FLUSH_ERR;
else {
switch (CQE_STATUS(cqe)) {
case TPT_ERR_SUCCESS:
wc->status = IB_WC_SUCCESS;
break;
case TPT_ERR_STAG:
wc->status = IB_WC_LOC_ACCESS_ERR;
break;
case TPT_ERR_PDID:
wc->status = IB_WC_LOC_PROT_ERR;
break;
case TPT_ERR_QPID:
case TPT_ERR_ACCESS:
wc->status = IB_WC_LOC_ACCESS_ERR;
break;
case TPT_ERR_WRAP:
wc->status = IB_WC_GENERAL_ERR;
break;
case TPT_ERR_BOUND:
wc->status = IB_WC_LOC_LEN_ERR;
break;
case TPT_ERR_INVALIDATE_SHARED_MR:
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
wc->status = IB_WC_MW_BIND_ERR;
break;
case TPT_ERR_CRC:
case TPT_ERR_MARKER:
case TPT_ERR_PDU_LEN_ERR:
case TPT_ERR_OUT_OF_RQE:
case TPT_ERR_DDP_VERSION:
case TPT_ERR_RDMA_VERSION:
case TPT_ERR_DDP_QUEUE_NUM:
case TPT_ERR_MSN:
case TPT_ERR_TBIT:
case TPT_ERR_MO:
case TPT_ERR_MSN_RANGE:
case TPT_ERR_IRD_OVERFLOW:
case TPT_ERR_OPCODE:
wc->status = IB_WC_FATAL_ERR;
break;
case TPT_ERR_SWFLUSH:
wc->status = IB_WC_WR_FLUSH_ERR;
break;
default:
printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
"QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
ret = -EINVAL;
}
}
out:
if (wq)
spin_unlock(&qhp->lock);
return ret;
}
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct iwch_dev *rhp;
struct iwch_cq *chp;
unsigned long flags;
int npolled;
int err = 0;
chp = to_iwch_cq(ibcq);
rhp = chp->rhp;
spin_lock_irqsave(&chp->lock, flags);
for (npolled = 0; npolled < num_entries; ++npolled) {
#ifdef DEBUG
int i=0;
#endif
/*
* Because T3 can post CQEs that are _not_ associated
* with a WR, we might have to poll again after removing
* one of these.
*/
do {
err = iwch_poll_cq_one(rhp, chp, wc + npolled);
#ifdef DEBUG
BUG_ON(++i > 1000);
#endif
} while (err == -EAGAIN);
if (err <= 0)
break;
}
spin_unlock_irqrestore(&chp->lock, flags);
if (err < 0)
return err;
else {
return npolled;
}
}
| gpl-2.0 |
sdfd/FPGA_Linux | drivers/net/wireless/ath/ath9k/debug.c | 63 | 41274 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/unaligned.h>
#include "ath9k.h"
#define REG_WRITE_D(_ah, _reg, _val) \
ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
#define REG_READ_D(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
void ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
{
if (sync_cause)
sc->debug.stats.istats.sync_cause_all++;
if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
sc->debug.stats.istats.sync_rtc_irq++;
if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
sc->debug.stats.istats.sync_mac_irq++;
if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
sc->debug.stats.istats.eeprom_illegal_access++;
if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
sc->debug.stats.istats.apb_timeout++;
if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
sc->debug.stats.istats.pci_mode_conflict++;
if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
sc->debug.stats.istats.host1_fatal++;
if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
sc->debug.stats.istats.host1_perr++;
if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
sc->debug.stats.istats.trcv_fifo_perr++;
if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
sc->debug.stats.istats.radm_cpl_ep++;
if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
sc->debug.stats.istats.radm_cpl_dllp_abort++;
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
sc->debug.stats.istats.radm_cpl_tlp_abort++;
if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
sc->debug.stats.istats.radm_cpl_ecrc_err++;
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
sc->debug.stats.istats.radm_cpl_timeout++;
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
sc->debug.stats.istats.local_timeout++;
if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
sc->debug.stats.istats.pm_access++;
if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
sc->debug.stats.istats.mac_awake++;
if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
sc->debug.stats.istats.mac_asleep++;
if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
sc->debug.stats.istats.mac_sleep_access++;
}
static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
u8 *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
#ifdef CONFIG_ATH_DEBUG
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
char buf[32];
unsigned int len;
len = sprintf(buf, "0x%08x\n", common->debug_mask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
unsigned long mask;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &mask))
return -EINVAL;
common->debug_mask = mask;
return count;
}
static const struct file_operations fops_debug = {
.read = read_file_debug,
.write = write_file_debug,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#endif
#define DMA_BUF_LEN 1024
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
unsigned int len = 0;
const unsigned int size = 1024;
ssize_t retval = 0;
char *buf;
int i;
struct {
const char *name;
unsigned int val;
} ani_info[] = {
{ "ANI RESET", ah->stats.ast_ani_reset },
{ "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
{ "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
{ "SPUR UP", ah->stats.ast_ani_spurup },
{ "SPUR DOWN", ah->stats.ast_ani_spurup },
{ "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
{ "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
{ "MRC-CCK ON", ah->stats.ast_ani_ccklow },
{ "MRC-CCK OFF", ah->stats.ast_ani_cckhigh },
{ "FIR-STEP UP", ah->stats.ast_ani_stepup },
{ "FIR-STEP DOWN", ah->stats.ast_ani_stepdown },
{ "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero },
{ "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs },
{ "CCK ERRORS", ah->stats.ast_ani_cckerrs },
};
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len += scnprintf(buf + len, size - len, "%15s: %s\n", "ANI",
common->disable_ani ? "DISABLED" : "ENABLED");
if (common->disable_ani)
goto exit;
for (i = 0; i < ARRAY_SIZE(ani_info); i++)
len += scnprintf(buf + len, size - len, "%15s: %u\n",
ani_info[i].name, ani_info[i].val);
exit:
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static ssize_t write_file_ani(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
unsigned long ani;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &ani))
return -EINVAL;
if (ani < 0 || ani > 1)
return -EINVAL;
common->disable_ani = !ani;
if (common->disable_ani) {
clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_stop_ani(sc);
} else {
ath_check_ani(sc);
}
return count;
}
static const struct file_operations fops_ani = {
.read = read_file_ani,
.write = write_file_ani,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_bt_ant_diversity(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
char buf[32];
unsigned int len;
len = sprintf(buf, "%d\n", common->bt_ant_diversity);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_bt_ant_diversity(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps;
unsigned long bt_ant_diversity;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
goto exit;
buf[len] = '\0';
if (kstrtoul(buf, 0, &bt_ant_diversity))
return -EINVAL;
common->bt_ant_diversity = !!bt_ant_diversity;
ath9k_ps_wakeup(sc);
ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity);
ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n",
common->bt_ant_diversity);
ath9k_ps_restore(sc);
exit:
return count;
}
static const struct file_operations fops_bt_ant_diversity = {
.read = read_file_bt_ant_diversity,
.write = write_file_bt_ant_diversity,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#endif
void ath9k_debug_stat_ant(struct ath_softc *sc,
struct ath_hw_antcomb_conf *div_ant_conf,
int main_rssi_avg, int alt_rssi_avg)
{
struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++;
as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++;
as_main->rssi_avg = main_rssi_avg;
as_alt->rssi_avg = alt_rssi_avg;
}
static ssize_t read_file_antenna_diversity(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
struct ath_hw_antcomb_conf div_ant_conf;
unsigned int len = 0;
const unsigned int size = 1024;
ssize_t retval = 0;
char *buf;
static const char *lna_conf_str[4] = {
"LNA1_MINUS_LNA2", "LNA2", "LNA1", "LNA1_PLUS_LNA2"
};
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
len += scnprintf(buf + len, size - len, "%s\n",
"Antenna Diversity Combining is disabled");
goto exit;
}
ath9k_ps_wakeup(sc);
ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
lna_conf_str[div_ant_conf.main_lna_conf]);
len += scnprintf(buf + len, size - len, "Current ALT config : %s\n",
lna_conf_str[div_ant_conf.alt_lna_conf]);
len += scnprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
as_main->rssi_avg);
len += scnprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
as_alt->rssi_avg);
ath9k_ps_restore(sc);
len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
len += scnprintf(buf + len, size - len, "-------------------\n");
len += scnprintf(buf + len, size - len, "%30s%15s\n",
"MAIN", "ALT");
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"TOTAL COUNT",
as_main->recv_cnt,
as_alt->recv_cnt);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 + LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 - LNA2",
as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
len += scnprintf(buf + len, size - len, "--------------------\n");
len += scnprintf(buf + len, size - len, "%30s%15s\n",
"MAIN", "ALT");
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 + LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
"LNA1 - LNA2",
as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
exit:
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static const struct file_operations fops_antenna_diversity = {
.read = read_file_antenna_diversity,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
char *buf;
int retval;
unsigned int len = 0;
u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
int i, qcuOffset = 0, dcuOffset = 0;
u32 *qcuBase = &val[0], *dcuBase = &val[4];
buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ath9k_ps_wakeup(sc);
REG_WRITE_D(ah, AR_MACMISC,
((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
i, val[i]);
}
len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
if (i == 8) {
qcuOffset = 0;
qcuBase++;
}
if (i == 6) {
dcuOffset = 0;
dcuBase++;
}
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"%2d %2x %1x %2x %2x\n",
i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
val[2] & (0x7 << (i * 3)) >> (i * 3),
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += scnprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
if (len > DMA_BUF_LEN)
len = DMA_BUF_LEN;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static const struct file_operations fops_dma = {
.read = read_file_dma,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
{
if (status)
sc->debug.stats.istats.total++;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
if (status & ATH9K_INT_RXLP)
sc->debug.stats.istats.rxlp++;
if (status & ATH9K_INT_RXHP)
sc->debug.stats.istats.rxhp++;
if (status & ATH9K_INT_BB_WATCHDOG)
sc->debug.stats.istats.bb_watchdog++;
} else {
if (status & ATH9K_INT_RX)
sc->debug.stats.istats.rxok++;
}
if (status & ATH9K_INT_RXEOL)
sc->debug.stats.istats.rxeol++;
if (status & ATH9K_INT_RXORN)
sc->debug.stats.istats.rxorn++;
if (status & ATH9K_INT_TX)
sc->debug.stats.istats.txok++;
if (status & ATH9K_INT_TXURN)
sc->debug.stats.istats.txurn++;
if (status & ATH9K_INT_RXPHY)
sc->debug.stats.istats.rxphyerr++;
if (status & ATH9K_INT_RXKCM)
sc->debug.stats.istats.rx_keycache_miss++;
if (status & ATH9K_INT_SWBA)
sc->debug.stats.istats.swba++;
if (status & ATH9K_INT_BMISS)
sc->debug.stats.istats.bmiss++;
if (status & ATH9K_INT_BNR)
sc->debug.stats.istats.bnr++;
if (status & ATH9K_INT_CST)
sc->debug.stats.istats.cst++;
if (status & ATH9K_INT_GTT)
sc->debug.stats.istats.gtt++;
if (status & ATH9K_INT_TIM)
sc->debug.stats.istats.tim++;
if (status & ATH9K_INT_CABEND)
sc->debug.stats.istats.cabend++;
if (status & ATH9K_INT_DTIMSYNC)
sc->debug.stats.istats.dtimsync++;
if (status & ATH9K_INT_DTIM)
sc->debug.stats.istats.dtim++;
if (status & ATH9K_INT_TSFOOR)
sc->debug.stats.istats.tsfoor++;
if (status & ATH9K_INT_MCI)
sc->debug.stats.istats.mci++;
if (status & ATH9K_INT_GENTIMER)
sc->debug.stats.istats.gen_timer++;
}
static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
unsigned int len = 0;
int rv;
int mxlen = 4000;
char *buf = kmalloc(mxlen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
#define PR_IS(a, s) \
do { \
len += scnprintf(buf + len, mxlen - len, \
"%21s: %10u\n", a, \
sc->debug.stats.istats.s); \
} while (0)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
PR_IS("RXLP", rxlp);
PR_IS("RXHP", rxhp);
PR_IS("WATHDOG", bb_watchdog);
} else {
PR_IS("RX", rxok);
}
PR_IS("RXEOL", rxeol);
PR_IS("RXORN", rxorn);
PR_IS("TX", txok);
PR_IS("TXURN", txurn);
PR_IS("MIB", mib);
PR_IS("RXPHY", rxphyerr);
PR_IS("RXKCM", rx_keycache_miss);
PR_IS("SWBA", swba);
PR_IS("BMISS", bmiss);
PR_IS("BNR", bnr);
PR_IS("CST", cst);
PR_IS("GTT", gtt);
PR_IS("TIM", tim);
PR_IS("CABEND", cabend);
PR_IS("DTIMSYNC", dtimsync);
PR_IS("DTIM", dtim);
PR_IS("TSFOOR", tsfoor);
PR_IS("MCI", mci);
PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
len += scnprintf(buf + len, mxlen - len,
"SYNC_CAUSE stats:\n");
PR_IS("Sync-All", sync_cause_all);
PR_IS("RTC-IRQ", sync_rtc_irq);
PR_IS("MAC-IRQ", sync_mac_irq);
PR_IS("EEPROM-Illegal-Access", eeprom_illegal_access);
PR_IS("APB-Timeout", apb_timeout);
PR_IS("PCI-Mode-Conflict", pci_mode_conflict);
PR_IS("HOST1-Fatal", host1_fatal);
PR_IS("HOST1-Perr", host1_perr);
PR_IS("TRCV-FIFO-Perr", trcv_fifo_perr);
PR_IS("RADM-CPL-EP", radm_cpl_ep);
PR_IS("RADM-CPL-DLLP-Abort", radm_cpl_dllp_abort);
PR_IS("RADM-CPL-TLP-Abort", radm_cpl_tlp_abort);
PR_IS("RADM-CPL-ECRC-Err", radm_cpl_ecrc_err);
PR_IS("RADM-CPL-Timeout", radm_cpl_timeout);
PR_IS("Local-Bus-Timeout", local_timeout);
PR_IS("PM-Access", pm_access);
PR_IS("MAC-Awake", mac_awake);
PR_IS("MAC-Asleep", mac_asleep);
PR_IS("MAC-Sleep-Access", mac_sleep_access);
if (len > mxlen)
len = mxlen;
rv = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return rv;
}
static const struct file_operations fops_interrupt = {
.read = read_file_interrupt,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char *buf;
unsigned int len = 0, size = 2048;
ssize_t retval = 0;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len += sprintf(buf, "%30s %10s%10s%10s\n\n",
"BE", "BK", "VI", "VO");
PR("MPDUs Queued: ", queued);
PR("MPDUs Completed: ", completed);
PR("MPDUs XRetried: ", xretries);
PR("Aggregates: ", a_aggr);
PR("AMPDUs Queued HW:", a_queued_hw);
PR("AMPDUs Queued SW:", a_queued_sw);
PR("AMPDUs Completed:", a_completed);
PR("AMPDUs Retried: ", a_retries);
PR("AMPDUs XRetried: ", a_xretries);
PR("TXERR Filtered: ", txerr_filtered);
PR("FIFO Underrun: ", fifo_underrun);
PR("TXOP Exceeded: ", xtxop);
PR("TXTIMER Expiry: ", timer_exp);
PR("DESC CFG Error: ", desc_cfg_err);
PR("DATA Underrun: ", data_underrun);
PR("DELIM Underrun: ", delim_underrun);
PR("TX-Pkts-All: ", tx_pkts_all);
PR("TX-Bytes-All: ", tx_bytes_all);
PR("HW-put-tx-buf: ", puttxbuf);
PR("HW-tx-start: ", txstart);
PR("HW-tx-proc-desc: ", txprocdesc);
PR("TX-Failed: ", txfailed);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
char *buf, ssize_t size)
{
ssize_t len = 0;
ath_txq_lock(sc, txq);
len += scnprintf(buf + len, size - len, "%s: %d ",
"qnum", txq->axq_qnum);
len += scnprintf(buf + len, size - len, "%s: %2d ",
"qdepth", txq->axq_depth);
len += scnprintf(buf + len, size - len, "%s: %2d ",
"ampdu-depth", txq->axq_ampdu_depth);
len += scnprintf(buf + len, size - len, "%s: %3d ",
"pending", txq->pending_frames);
len += scnprintf(buf + len, size - len, "%s: %d\n",
"stopped", txq->stopped);
ath_txq_unlock(sc, txq);
return len;
}
static ssize_t read_file_queues(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_txq *txq;
char *buf;
unsigned int len = 0;
const unsigned int size = 1024;
ssize_t retval = 0;
int i;
static const char *qname[4] = {
"VO", "VI", "BE", "BK"
};
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
len += scnprintf(buf + len, size - len, "(%s): ", qname[i]);
len += print_queue(sc, txq, buf + len, size - len);
}
len += scnprintf(buf + len, size - len, "(CAB): ");
len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static ssize_t read_file_misc(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hw *hw = sc->hw;
struct ath9k_vif_iter_data iter_data;
char buf[512];
unsigned int len = 0;
ssize_t retval = 0;
unsigned int reg;
u32 rxfilter;
len += scnprintf(buf + len, sizeof(buf) - len,
"BSSID: %pM\n", common->curbssid);
len += scnprintf(buf + len, sizeof(buf) - len,
"BSSID-MASK: %pM\n", common->bssidmask);
len += scnprintf(buf + len, sizeof(buf) - len,
"OPMODE: %s\n",
ath_opmode_to_string(sc->sc_ah->opmode));
ath9k_ps_wakeup(sc);
rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
ath9k_ps_restore(sc);
len += scnprintf(buf + len, sizeof(buf) - len,
"RXFILTER: 0x%x", rxfilter);
if (rxfilter & ATH9K_RX_FILTER_UCAST)
len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (rxfilter & ATH9K_RX_FILTER_MCAST)
len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (rxfilter & ATH9K_RX_FILTER_BCAST)
len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (rxfilter & ATH9K_RX_FILTER_CONTROL)
len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (rxfilter & ATH9K_RX_FILTER_BEACON)
len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (rxfilter & ATH9K_RX_FILTER_PROM)
len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (rxfilter & ATH9K_RX_FILTER_PHYERR)
len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
len += scnprintf(buf + len, sizeof(buf) - len, "\n");
reg = sc->sc_ah->imask;
len += scnprintf(buf + len, sizeof(buf) - len,
"INTERRUPT-MASK: 0x%x", reg);
if (reg & ATH9K_INT_SWBA)
len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
if (reg & ATH9K_INT_BMISS)
len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
if (reg & ATH9K_INT_CST)
len += scnprintf(buf + len, sizeof(buf) - len, " CST");
if (reg & ATH9K_INT_RX)
len += scnprintf(buf + len, sizeof(buf) - len, " RX");
if (reg & ATH9K_INT_RXHP)
len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
if (reg & ATH9K_INT_RXLP)
len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
if (reg & ATH9K_INT_BB_WATCHDOG)
len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
len += scnprintf(buf + len, sizeof(buf) - len, "\n");
ath9k_calculate_iter_data(hw, NULL, &iter_data);
len += scnprintf(buf + len, sizeof(buf) - len,
"VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
" ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
iter_data.naps, iter_data.nstations, iter_data.nmeshes,
iter_data.nwds, iter_data.nadhocs,
sc->nvifs, sc->nbcnvifs);
if (len > sizeof(buf))
len = sizeof(buf);
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
return retval;
}
static ssize_t read_file_reset(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char buf[512];
unsigned int len = 0;
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Baseband Hang",
sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Baseband Watchdog",
sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Fatal HW Error",
sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "TX HW error",
sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "TX Path Hang",
sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "PLL RX Hang",
sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "MAC Hang",
sc->debug.stats.reset[RESET_TYPE_MAC_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "Stuck Beacon",
sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]);
len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "MCI Reset",
sc->debug.stats.reset[RESET_TYPE_MCI]);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq,
unsigned int flags)
{
int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries);
else
TX_STAT_INC(qnum, a_completed);
} else {
if (ts->ts_status & ATH9K_TXERR_XRETRY)
TX_STAT_INC(qnum, xretries);
else
TX_STAT_INC(qnum, completed);
}
if (ts->ts_status & ATH9K_TXERR_FILT)
TX_STAT_INC(qnum, txerr_filtered);
if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(qnum, xtxop);
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(qnum, timer_exp);
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(qnum, desc_cfg_err);
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun);
}
static const struct file_operations fops_xmit = {
.read = read_file_xmit,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static const struct file_operations fops_queues = {
.read = read_file_queues,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static const struct file_operations fops_misc = {
.read = read_file_misc,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static const struct file_operations fops_reset = {
.read = read_file_reset,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
}
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char buf[32];
unsigned int len;
len = sprintf(buf, "0x%08x\n", sc->debug.regidx);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
unsigned long regidx;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, ®idx))
return -EINVAL;
sc->debug.regidx = regidx;
return count;
}
static const struct file_operations fops_regidx = {
.read = read_file_regidx,
.write = write_file_regidx,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_regval(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
char buf[32];
unsigned int len;
u32 regval;
ath9k_ps_wakeup(sc);
regval = REG_READ_D(ah, sc->debug.regidx);
ath9k_ps_restore(sc);
len = sprintf(buf, "0x%08x\n", regval);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
unsigned long regval;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, ®val))
return -EINVAL;
ath9k_ps_wakeup(sc);
REG_WRITE_D(ah, sc->debug.regidx, regval);
ath9k_ps_restore(sc);
return count;
}
static const struct file_operations fops_regval = {
.read = read_file_regval,
.write = write_file_regval,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#define REGDUMP_LINE_SIZE 20
static int open_file_regdump(struct inode *inode, struct file *file)
{
struct ath_softc *sc = inode->i_private;
unsigned int len = 0;
u8 *buf;
int i;
unsigned long num_regs, regdump_len, max_reg_offset;
max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
num_regs = max_reg_offset / 4 + 1;
regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
buf = vmalloc(regdump_len);
if (!buf)
return -ENOMEM;
ath9k_ps_wakeup(sc);
for (i = 0; i < num_regs; i++)
len += scnprintf(buf + len, regdump_len - len,
"0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
ath9k_ps_restore(sc);
file->private_data = buf;
return 0;
}
static const struct file_operations fops_regdump = {
.open = open_file_regdump,
.read = ath9k_debugfs_read_buf,
.release = ath9k_debugfs_release_buf,
.owner = THIS_MODULE,
.llseek = default_llseek,/* read accesses f_pos */
};
static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
struct ath9k_nfcal_hist *h = sc->caldata.nfCalHist;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
u32 len = 0, size = 1500;
u32 i, j;
ssize_t retval = 0;
char *buf;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
u8 nread;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len += scnprintf(buf + len, size - len,
"Channel Noise Floor : %d\n", ah->noise);
len += scnprintf(buf + len, size - len,
"Chain | privNF | # Readings | NF Readings\n");
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
i, h[i].privNF, nread);
for (j = 0; j < nread; j++)
len += scnprintf(buf + len, size - len,
" %d", h[i].nfCalBuffer[j]);
len += scnprintf(buf + len, size - len, "\n");
}
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static const struct file_operations fops_dump_nfcal = {
.read = read_file_dump_nfcal,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
u32 len = 0, size = 1500;
char *buf;
size_t retval;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
if (!sc->sc_ah->common.btcoex_enabled) {
len = scnprintf(buf, size, "%s\n",
"BTCOEX is disabled");
goto exit;
}
len = ath9k_dump_btcoex(sc, buf, size);
exit:
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static const struct file_operations fops_btcoex = {
.read = read_file_btcoex,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
#endif
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
"rx_pkts_nic",
"rx_bytes_nic",
AMKSTR(d_tx_pkts),
AMKSTR(d_tx_bytes),
AMKSTR(d_tx_mpdus_queued),
AMKSTR(d_tx_mpdus_completed),
AMKSTR(d_tx_mpdu_xretries),
AMKSTR(d_tx_aggregates),
AMKSTR(d_tx_ampdus_queued_hw),
AMKSTR(d_tx_ampdus_queued_sw),
AMKSTR(d_tx_ampdus_completed),
AMKSTR(d_tx_ampdu_retries),
AMKSTR(d_tx_ampdu_xretries),
AMKSTR(d_tx_fifo_underrun),
AMKSTR(d_tx_op_exceeded),
AMKSTR(d_tx_timer_expiry),
AMKSTR(d_tx_desc_cfg_err),
AMKSTR(d_tx_data_underrun),
AMKSTR(d_tx_delim_underrun),
"d_rx_crc_err",
"d_rx_decrypt_crc_err",
"d_rx_phy_err",
"d_rx_mic_err",
"d_rx_pre_delim_crc_err",
"d_rx_post_delim_crc_err",
"d_rx_decrypt_busy_err",
"d_rx_phyerr_radar",
"d_rx_phyerr_ofdm_timing",
"d_rx_phyerr_cck_timing",
};
#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
void ath9k_get_et_strings(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data)
{
if (sset == ETH_SS_STATS)
memcpy(data, *ath9k_gstrings_stats,
sizeof(ath9k_gstrings_stats));
}
int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int sset)
{
if (sset == ETH_SS_STATS)
return ATH9K_SSTATS_LEN;
return 0;
}
#define AWDATA(elem) \
do { \
data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].elem; \
data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].elem; \
data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].elem; \
data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].elem; \
} while (0)
#define AWDATA_RX(elem) \
do { \
data[i++] = sc->debug.stats.rxstats.elem; \
} while (0)
void ath9k_get_et_stats(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data)
{
struct ath_softc *sc = hw->priv;
int i = 0;
data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
AWDATA_RX(rx_pkts_all);
AWDATA_RX(rx_bytes_all);
AWDATA(tx_pkts_all);
AWDATA(tx_bytes_all);
AWDATA(queued);
AWDATA(completed);
AWDATA(xretries);
AWDATA(a_aggr);
AWDATA(a_queued_hw);
AWDATA(a_queued_sw);
AWDATA(a_completed);
AWDATA(a_retries);
AWDATA(a_xretries);
AWDATA(fifo_underrun);
AWDATA(xtxop);
AWDATA(timer_exp);
AWDATA(desc_cfg_err);
AWDATA(data_underrun);
AWDATA(delim_underrun);
AWDATA_RX(crc_err);
AWDATA_RX(decrypt_crc_err);
AWDATA_RX(phy_err);
AWDATA_RX(mic_err);
AWDATA_RX(pre_delim_crc_err);
AWDATA_RX(post_delim_crc_err);
AWDATA_RX(decrypt_busy_err);
AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
WARN_ON(i != ATH9K_SSTATS_LEN);
}
void ath9k_deinit_debug(struct ath_softc *sc)
{
ath9k_spectral_deinit_debug(sc);
}
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
sc->hw->wiphy->debugfsdir);
if (!sc->debug.debugfs_phy)
return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG
debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_debug);
#endif
ath9k_dfs_init_debug(sc);
ath9k_tx99_init_debug(sc);
ath9k_spectral_init_debug(sc);
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_interrupt);
debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_xmit);
debugfs_create_file("queues", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_queues);
debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->tx.txq_max_pending[IEEE80211_AC_BK]);
debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->tx.txq_max_pending[IEEE80211_AC_BE]);
debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->tx.txq_max_pending[IEEE80211_AC_VI]);
debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->tx.txq_max_pending[IEEE80211_AC_VO]);
debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_misc);
debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_reset);
ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->rxchainmask);
debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->txchainmask);
debugfs_create_file("ani", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_ani);
debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
&sc->sc_ah->config.enable_paprd);
debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_regidx);
debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_regval);
debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy,
&ah->config.cwm_ignore_extcca);
debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_regdump);
debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dump_nfcal);
ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
debugfs_create_file("antenna_diversity", S_IRUSR,
sc->debug.debugfs_phy, sc, &fops_antenna_diversity);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity);
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
return 0;
}
| gpl-2.0 |
uniquejainakshay/Linux_Kernel | drivers/net/wireless/rtlwifi/rtl8188ee/sw.c | 319 | 13044 | /******************************************************************************
*
* Copyright(c) 2009-2013 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../core.h"
#include "../pci.h"
#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "dm.h"
#include "hw.h"
#include "sw.h"
#include "trx.h"
#include "led.h"
#include "table.h"
#include <linux/vmalloc.h>
#include <linux/module.h>
static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
rtlpci->const_amdpci_aspm = 0;
/* ASPM PS mode.
* 0 - Disable ASPM,
* 1 - Enable ASPM without Clock Req,
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
* set defult to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
/*Setting for PCI-E device */
rtlpci->const_devicepci_aspm_setting = 0x03;
/*Setting for PCI-E bridge */
rtlpci->const_hostpci_aspm_setting = 0x02;
/* In Hw/Sw Radio Off situation.
* 0 - Default,
* 1 - From ASPM setting without low Mac Pwr,
* 2 - From ASPM setting with low Mac Pwr,
* 3 - Bus D3
* set default to RTL8192CE:0 RTL8192SE:2
*/
rtlpci->const_hwsw_rfoff_d3 = 0;
/* This setting works for those device with
* backdoor ASPM setting such as EPHY setting.
* 0 - Not support ASPM,
* 1 - Support ASPM,
* 2 - According to chipset.
*/
rtlpci->const_support_pciaspm = 1;
}
int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
{
int err = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 tid;
rtl8188ee_bt_reg_init(hw);
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
rtlpriv->dm.disable_framebursting = 0;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(15);
/* compatible 5G band 88ce just 2.4G band & smsp */
rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
rtlpci->receive_config = (RCR_APPFCS |
RCR_APP_MIC |
RCR_APP_ICV |
RCR_APP_PHYST_RXFF |
RCR_HTC_LOC_CTRL |
RCR_AMF |
RCR_ACF |
RCR_ADF |
RCR_AICV |
RCR_ACRC32 |
RCR_AB |
RCR_AM |
RCR_APM |
0);
rtlpci->irq_mask[0] =
(u32) (IMR_PSTIMEOUT |
IMR_HSISR_IND_ON_INT |
IMR_C2HCMD |
IMR_HIGHDOK |
IMR_MGNTDOK |
IMR_BKDOK |
IMR_BEDOK |
IMR_VIDOK |
IMR_VODOK |
IMR_RDU |
IMR_ROK |
0);
rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0);
rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN);
/* for debug level */
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for LPS & IPS */
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
if (!rtlpriv->psc.inactiveps)
pr_info("rtl8188ee: Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
pr_info("rtl8188ee: FW Power Save off (module option)\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
/* for ASPM, you can close aspm through
* set const_support_pciaspm = 0
*/
rtl88e_init_aspm_vars(hw);
if (rtlpriv->psc.reg_fwctrl_lps == 1)
rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
else if (rtlpriv->psc.reg_fwctrl_lps == 2)
rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
else if (rtlpriv->psc.reg_fwctrl_lps == 3)
rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vmalloc(0x8000);
if (!rtlpriv->rtlhal.pfirmware) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Can't alloc buffer for fw.\n");
return 1;
}
rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin";
rtlpriv->max_fw_size = 0x8000;
pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Failed to request firmware!\n");
return 1;
}
/* for early mode */
rtlpriv->rtlhal.earlymode_enable = false;
rtlpriv->rtlhal.max_earlymode_num = 10;
for (tid = 0; tid < 8; tid++)
skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
/*low power */
rtlpriv->psc.low_power_enable = false;
if (rtlpriv->psc.low_power_enable) {
init_timer(&rtlpriv->works.fw_clockoff_timer);
setup_timer(&rtlpriv->works.fw_clockoff_timer,
rtl88ee_fw_clk_off_timer_callback,
(unsigned long)hw);
}
init_timer(&rtlpriv->works.fast_antenna_training_timer);
setup_timer(&rtlpriv->works.fast_antenna_training_timer,
rtl88e_dm_fast_antenna_training_callback,
(unsigned long)hw);
return err;
}
void rtl88e_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->rtlhal.pfirmware) {
vfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
if (rtlpriv->psc.low_power_enable)
del_timer_sync(&rtlpriv->works.fw_clockoff_timer);
del_timer_sync(&rtlpriv->works.fast_antenna_training_timer);
}
static struct rtl_hal_ops rtl8188ee_hal_ops = {
.init_sw_vars = rtl88e_init_sw_vars,
.deinit_sw_vars = rtl88e_deinit_sw_vars,
.read_eeprom_info = rtl88ee_read_eeprom_info,
.interrupt_recognized = rtl88ee_interrupt_recognized,/*need check*/
.hw_init = rtl88ee_hw_init,
.hw_disable = rtl88ee_card_disable,
.hw_suspend = rtl88ee_suspend,
.hw_resume = rtl88ee_resume,
.enable_interrupt = rtl88ee_enable_interrupt,
.disable_interrupt = rtl88ee_disable_interrupt,
.set_network_type = rtl88ee_set_network_type,
.set_chk_bssid = rtl88ee_set_check_bssid,
.set_qos = rtl88ee_set_qos,
.set_bcn_reg = rtl88ee_set_beacon_related_registers,
.set_bcn_intv = rtl88ee_set_beacon_interval,
.update_interrupt_mask = rtl88ee_update_interrupt_mask,
.get_hw_reg = rtl88ee_get_hw_reg,
.set_hw_reg = rtl88ee_set_hw_reg,
.update_rate_tbl = rtl88ee_update_hal_rate_tbl,
.fill_tx_desc = rtl88ee_tx_fill_desc,
.fill_tx_cmddesc = rtl88ee_tx_fill_cmddesc,
.query_rx_desc = rtl88ee_rx_query_desc,
.set_channel_access = rtl88ee_update_channel_access_setting,
.radio_onoff_checking = rtl88ee_gpio_radio_on_off_checking,
.set_bw_mode = rtl88e_phy_set_bw_mode,
.switch_channel = rtl88e_phy_sw_chnl,
.dm_watchdog = rtl88e_dm_watchdog,
.scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl88e_phy_set_rf_power_state,
.led_control = rtl88ee_led_control,
.set_desc = rtl88ee_set_desc,
.get_desc = rtl88ee_get_desc,
.tx_polling = rtl88ee_tx_polling,
.enable_hw_sec = rtl88ee_enable_hw_security_config,
.set_key = rtl88ee_set_key,
.init_sw_leds = rtl88ee_init_sw_leds,
.allow_all_destaddr = rtl88ee_allow_all_destaddr,
.get_bbreg = rtl88e_phy_query_bb_reg,
.set_bbreg = rtl88e_phy_set_bb_reg,
.get_rfreg = rtl88e_phy_query_rf_reg,
.set_rfreg = rtl88e_phy_set_rf_reg,
};
static struct rtl_mod_params rtl88ee_mod_params = {
.sw_crypto = false,
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
.debug = DBG_EMERG,
};
static struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
.ops = &rtl8188ee_hal_ops,
.mod_params = &rtl88ee_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
.maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
.maps[SYS_CLK] = REG_SYS_CLKR,
.maps[MAC_RCR_AM] = AM,
.maps[MAC_RCR_AB] = AB,
.maps[MAC_RCR_ACRC32] = ACRC32,
.maps[MAC_RCR_ACF] = ACF,
.maps[MAC_RCR_AAP] = AAP,
.maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
.maps[EFUSE_TEST] = REG_EFUSE_TEST,
.maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
.maps[EFUSE_CLK] = 0,
.maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
.maps[EFUSE_PWC_EV12V] = PWC_EV12V,
.maps[EFUSE_FEN_ELDR] = FEN_ELDR,
.maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
.maps[EFUSE_ANA8M] = ANA8M,
.maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
.maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
.maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
.maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
.maps[RWCAM] = REG_CAMCMD,
.maps[WCAMI] = REG_CAMWRITE,
.maps[RCAMO] = REG_CAMREAD,
.maps[CAMDBG] = REG_CAMDBG,
.maps[SECR] = REG_SECCFG,
.maps[SEC_CAM_NONE] = CAM_NONE,
.maps[SEC_CAM_WEP40] = CAM_WEP40,
.maps[SEC_CAM_TKIP] = CAM_TKIP,
.maps[SEC_CAM_AES] = CAM_AES,
.maps[SEC_CAM_WEP104] = CAM_WEP104,
.maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
.maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
.maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
.maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
.maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
.maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
/* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/
.maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
.maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
.maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
.maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
.maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
.maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
.maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
/* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/
/* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/
.maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
.maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
.maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0,
.maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
.maps[RTL_IMR_RDU] = IMR_RDU,
.maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
.maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
.maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
.maps[RTL_IMR_TBDER] = IMR_TBDER,
.maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
.maps[RTL_IMR_TBDOK] = IMR_TBDOK,
.maps[RTL_IMR_BKDOK] = IMR_BKDOK,
.maps[RTL_IMR_BEDOK] = IMR_BEDOK,
.maps[RTL_IMR_VIDOK] = IMR_VIDOK,
.maps[RTL_IMR_VODOK] = IMR_VODOK,
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
.maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
.maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
.maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
.maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
.maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
.maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
.maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
.maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
.maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
.maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
.maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
.maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
.maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
.maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
};
static DEFINE_PCI_DEVICE_TABLE(rtl88ee_pci_ids) = {
{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8179, rtl88ee_hal_cfg)},
{},
};
MODULE_DEVICE_TABLE(pci, rtl88ee_pci_ids);
MODULE_AUTHOR("zhiyuan_yang <zhiyuan_yang@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8188E 802.11n PCI wireless");
MODULE_FIRMWARE("rtlwifi/rtl8188efw.bin");
module_param_named(swenc, rtl88ee_mod_params.sw_crypto, bool, 0444);
module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
static struct pci_driver rtl88ee_driver = {
.name = KBUILD_MODNAME,
.id_table = rtl88ee_pci_ids,
.probe = rtl_pci_probe,
.remove = rtl_pci_disconnect,
.driver.pm = &rtlwifi_pm_ops,
};
module_pci_driver(rtl88ee_driver);
| gpl-2.0 |
jallen93/linux-vnic-dbg | net/bridge/netfilter/nf_log_bridge.c | 319 | 2167 | /*
* (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_bridge.h>
#include <linux/ip.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_log.h>
static void nf_log_bridge_packet(struct net *net, u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
nf_log_l2packet(net, pf, eth_hdr(skb)->h_proto, hooknum, skb,
in, out, loginfo, prefix);
}
static struct nf_logger nf_bridge_logger __read_mostly = {
.name = "nf_log_bridge",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_bridge_packet,
.me = THIS_MODULE,
};
static int __net_init nf_log_bridge_net_init(struct net *net)
{
return nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
}
static void __net_exit nf_log_bridge_net_exit(struct net *net)
{
nf_log_unset(net, &nf_bridge_logger);
}
static struct pernet_operations nf_log_bridge_net_ops = {
.init = nf_log_bridge_net_init,
.exit = nf_log_bridge_net_exit,
};
static int __init nf_log_bridge_init(void)
{
int ret;
/* Request to load the real packet loggers. */
nf_logger_request_module(NFPROTO_IPV4, NF_LOG_TYPE_LOG);
nf_logger_request_module(NFPROTO_IPV6, NF_LOG_TYPE_LOG);
nf_logger_request_module(NFPROTO_ARP, NF_LOG_TYPE_LOG);
ret = register_pernet_subsys(&nf_log_bridge_net_ops);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger);
return 0;
}
static void __exit nf_log_bridge_exit(void)
{
unregister_pernet_subsys(&nf_log_bridge_net_ops);
nf_log_unregister(&nf_bridge_logger);
}
module_init(nf_log_bridge_init);
module_exit(nf_log_bridge_exit);
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("Netfilter bridge packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0);
| gpl-2.0 |
playfulgod/msm-3.0 | kernel/time/tick-broadcast.c | 575 | 15883 | /*
* linux/kernel/time/tick-broadcast.c
*
* This file contains functions which emulate a local clock-event
* device via a broadcast event source.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
*
* This code is licenced under the GPL version 2. For details see
* kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include "tick-internal.h"
/*
* Broadcast support for broken x86 hardware, where the local apic
* timer stops in C3 state.
*/
static struct tick_device tick_broadcast_device;
/* FIXME: Use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
static DECLARE_BITMAP(tmpmask, NR_CPUS);
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif
/*
* Debugging: see timer_list.c
*/
struct tick_device *tick_get_broadcast_device(void)
{
return &tick_broadcast_device;
}
struct cpumask *tick_get_broadcast_mask(void)
{
return to_cpumask(tick_broadcast_mask);
}
/*
* Start the device in periodic mode
*/
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
if (bc)
tick_setup_periodic(bc, 1);
}
/*
* Check, if the device can be utilized as broadcast device:
*/
int tick_check_broadcast_device(struct clock_event_device *dev)
{
if ((tick_broadcast_device.evtdev &&
tick_broadcast_device.evtdev->rating >= dev->rating) ||
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
clockevents_exchange_device(NULL, dev);
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
return 1;
}
/*
* Check, if the device is the broadcast device
*/
int tick_is_broadcast_device(struct clock_event_device *dev)
{
return (dev && tick_broadcast_device.evtdev == dev);
}
/*
* Check, if the device is disfunctional and a place holder, which
* needs to be handled by the broadcast device.
*/
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Devices might be registered with both periodic and oneshot
* mode disabled. This signals, that the device needs to be
* operated from the broadcast device and is a placeholder for
* the cpu local device.
*/
if (!tick_device_is_functional(dev)) {
dev->event_handler = tick_handle_periodic;
cpumask_set_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
ret = 1;
} else {
/*
* When the new device is not affected by the stop
* feature and the cpu is marked in the broadcast mask
* then clear the broadcast bit.
*/
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
int cpu = smp_processor_id();
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_clear_oneshot(cpu);
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
/*
* Broadcast the event to the cpus, which are set in the mask (mangled).
*/
static void tick_do_broadcast(struct cpumask *mask)
{
int cpu = smp_processor_id();
struct tick_device *td;
/*
* Check, if the current cpu is in the mask
*/
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
td = &per_cpu(tick_cpu_device, cpu);
td->evtdev->event_handler(td->evtdev);
}
if (!cpumask_empty(mask)) {
/*
* It might be necessary to actually check whether the devices
* have different broadcast functions. For now, just use the
* one of the first device. This works as long as we have this
* misfeature only on x86 (lapic)
*/
td = &per_cpu(tick_cpu_device, cpumask_first(mask));
td->evtdev->broadcast(mask);
}
}
/*
* Periodic broadcast:
* - invoke the broadcast handlers
*/
static void tick_do_periodic_broadcast(void)
{
raw_spin_lock(&tick_broadcast_lock);
cpumask_and(to_cpumask(tmpmask),
cpu_online_mask, tick_get_broadcast_mask());
tick_do_broadcast(to_cpumask(tmpmask));
raw_spin_unlock(&tick_broadcast_lock);
}
/*
* Event handler for periodic broadcast ticks
*/
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
ktime_t next;
tick_do_periodic_broadcast();
/*
* The device is in periodic mode. No reprogramming necessary:
*/
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
return;
/*
* Setup the next period for devices, which do not have
* periodic mode. We read dev->next_event first and add to it
* when the event already expired. clockevents_program_event()
* sets dev->next_event only when the event is really
* programmed to the device.
*/
for (next = dev->next_event; ;) {
next = ktime_add(next, tick_period);
if (!clockevents_program_event(dev, next, ktime_get()))
return;
tick_do_periodic_broadcast();
}
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
*/
static void tick_do_broadcast_on_off(unsigned long *reason)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags;
int cpu, bc_stopped;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;
bc = tick_broadcast_device.evtdev;
/*
* Is the device not affected by the powerstate ?
*/
if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
goto out;
if (!tick_device_is_functional(dev))
goto out;
bc_stopped = cpumask_empty(tick_get_broadcast_mask());
switch (*reason) {
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
cpumask_set_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
}
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
tick_broadcast_force = 1;
break;
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
if (!tick_broadcast_force &&
cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
}
break;
}
if (cpumask_empty(tick_get_broadcast_mask())) {
if (!bc_stopped)
clockevents_shutdown(bc);
} else if (bc_stopped) {
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
else
tick_broadcast_setup_oneshot(bc);
}
out:
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop.
*/
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
else
tick_do_broadcast_on_off(&reason);
}
/*
* Set the periodic handler depending on broadcast on/off
*/
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
if (!broadcast)
dev->event_handler = tick_handle_periodic;
else
dev->event_handler = tick_handle_periodic_broadcast;
}
/*
* Remove a CPU from broadcasting
*/
void tick_shutdown_broadcast(unsigned int *cpup)
{
struct clock_event_device *bc;
unsigned long flags;
unsigned int cpu = *cpup;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpumask_empty(tick_get_broadcast_mask()))
clockevents_shutdown(bc);
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
void tick_suspend_broadcast(void)
{
struct clock_event_device *bc;
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc)
clockevents_shutdown(bc);
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
int tick_resume_broadcast(void)
{
struct clock_event_device *bc;
unsigned long flags;
int broadcast = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc) {
clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
switch (tick_broadcast_device.mode) {
case TICKDEV_MODE_PERIODIC:
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(bc);
broadcast = cpumask_test_cpu(smp_processor_id(),
tick_get_broadcast_mask());
break;
case TICKDEV_MODE_ONESHOT:
broadcast = tick_resume_broadcast_oneshot(bc);
break;
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return broadcast;
}
#ifdef CONFIG_TICK_ONESHOT
/* FIXME: use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
/*
* Exposed for debugging: see timer_list.c
*/
struct cpumask *tick_get_broadcast_oneshot_mask(void)
{
return to_cpumask(tick_broadcast_oneshot_mask);
}
static int tick_broadcast_set_event(ktime_t expires, int force)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
return tick_dev_program_event(bc, expires, force);
}
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
return 0;
}
/*
* Called from irq_enter() when idle was interrupted to reenable the
* per cpu device.
*/
void tick_check_oneshot_broadcast(int cpu)
{
if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
}
}
/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
ktime_t now, next_event;
int cpu;
raw_spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
cpumask_clear(to_cpumask(tmpmask));
now = ktime_get();
/* Find all expired events */
for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64)
cpumask_set_cpu(cpu, to_cpumask(tmpmask));
else if (td->evtdev->next_event.tv64 < next_event.tv64)
next_event.tv64 = td->evtdev->next_event.tv64;
}
/*
* Wakeup the cpus which have an expired event.
*/
tick_do_broadcast(to_cpumask(tmpmask));
/*
* Two reasons for reprogram:
*
* - The global event did not expire any CPU local
* events. This happens in dyntick mode, as the maximum PIT
* delta is quite small.
*
* - There are pending events on sleeping CPUs which were not
* in the event mask
*/
if (next_event.tv64 != KTIME_MAX) {
/*
* Rearm the broadcast device. If event expired,
* repeat the above
*/
if (tick_broadcast_set_event(next_event, 0))
goto again;
}
raw_spin_unlock(&tick_broadcast_lock);
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
*/
void tick_broadcast_oneshot_control(unsigned long reason)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags;
int cpu;
/*
* Periodic mode does not care about the enter/exit of power
* states
*/
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
return;
/*
* We are called with preemtion disabled from the depth of the
* idle code, so we can't be moved away.
*/
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
return;
bc = tick_broadcast_device.evtdev;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
if (dev->next_event.tv64 < bc->next_event.tv64)
tick_broadcast_set_event(dev->next_event, 1);
}
} else {
if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
cpumask_clear_cpu(cpu,
tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
if (dev->next_event.tv64 != KTIME_MAX)
tick_program_event(dev->next_event, 1);
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Reset the one shot broadcast for a cpu
*
* Called with tick_broadcast_lock held
*/
static void tick_broadcast_clear_oneshot(int cpu)
{
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
}
static void tick_broadcast_init_next_event(struct cpumask *mask,
ktime_t expires)
{
struct tick_device *td;
int cpu;
for_each_cpu(cpu, mask) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev)
td->evtdev->next_event = expires;
}
}
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
int cpu = smp_processor_id();
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
bc->event_handler = tick_handle_oneshot_broadcast;
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
/* Take the do_timer update */
tick_do_timer_cpu = cpu;
/*
* We must be careful here. There might be other CPUs
* waiting for periodic broadcast. We need to set the
* oneshot_mask bits for those and program the
* broadcast device to fire.
*/
cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
cpumask_or(tick_get_broadcast_oneshot_mask(),
tick_get_broadcast_oneshot_mask(),
to_cpumask(tmpmask));
if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
tick_broadcast_init_next_event(to_cpumask(tmpmask),
tick_next_period);
tick_broadcast_set_event(tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
} else {
/*
* The first cpu which switches to oneshot mode sets
* the bit for all other cpus which are in the general
* (periodic) broadcast mask. So the bit is set and
* would prevent the first broadcast enter after this
* to program the bc device.
*/
tick_broadcast_clear_oneshot(cpu);
}
}
/*
* Select oneshot operating mode for the broadcast device
*/
void tick_broadcast_switch_to_oneshot(void)
{
struct clock_event_device *bc;
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Remove a dead CPU from broadcasting
*/
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
unsigned long flags;
unsigned int cpu = *cpup;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Clear the broadcast mask flag for the dead cpu, but do not
* stop the broadcast device!
*/
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Check, whether the broadcast device is in one shot mode
*/
int tick_broadcast_oneshot_active(void)
{
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
/*
* Check whether the broadcast device supports oneshot.
*/
bool tick_broadcast_oneshot_available(void)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}
#endif
| gpl-2.0 |
hellprototypes/mini2440_linux-2.6.32.2 | drivers/usb/host/ehci-fsl.c | 575 | 9460 | /*
* Copyright (c) 2005 MontaVista Software
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
*/
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include "ehci-fsl.h"
/* FIXME: Power Management is un-ported so temporarily disable it */
#undef CONFIG_PM
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/**
* usb_hcd_fsl_probe - initialize FSL-based HCDs
* @drvier: Driver to be used for this HCD
* @pdev: USB Host Controller being probed
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller.
*
*/
int usb_hcd_fsl_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct usb_hcd *hcd;
struct resource *res;
int irq;
int retval;
unsigned int temp;
pr_debug("initializing FSL-SOC USB Controller\n");
/* Need platform data for setup */
pdata = (struct fsl_usb2_platform_data *)pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev,
"No platform data for %s.\n", dev_name(&pdev->dev));
return -ENODEV;
}
/*
* This is a host mode driver, verify that we're supposed to be
* in host mode.
*/
if (!((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_MPH_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))) {
dev_err(&pdev->dev,
"Non Host Mode configured for %s. Wrong driver linked.\n",
dev_name(&pdev->dev));
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev,
"Found HC with no IRQ. Check %s setup!\n",
dev_name(&pdev->dev));
return -ENODEV;
}
irq = res->start;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto err1;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev,
"Found HC with no register addr. Check %s setup!\n",
dev_name(&pdev->dev));
retval = -ENODEV;
goto err2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = res->end - res->start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
retval = -EBUSY;
goto err2;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
retval = -EFAULT;
goto err3;
}
/* Enable USB controller */
temp = in_be32(hcd->regs + 0x500);
out_be32(hcd->regs + 0x500, temp | 0x4);
/* Set to Host mode */
temp = in_le32(hcd->regs + 0x1a8);
out_le32(hcd->regs + 0x1a8, temp | 0x3);
retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
goto err4;
return retval;
err4:
iounmap(hcd->regs);
err3:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
return retval;
}
/* may be called without controller electrically present */
/* may be called with controller, bus, and devices active */
/**
* usb_hcd_fsl_remove - shutdown processing for FSL-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
* Reverses the effect of usb_hcd_fsl_probe().
*
*/
void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
static void mpc83xx_setup_phy(struct ehci_hcd *ehci,
enum fsl_usb2_phy_modes phy_mode,
unsigned int port_offset)
{
u32 portsc = 0;
switch (phy_mode) {
case FSL_USB2_PHY_ULPI:
portsc |= PORT_PTS_ULPI;
break;
case FSL_USB2_PHY_SERIAL:
portsc |= PORT_PTS_SERIAL;
break;
case FSL_USB2_PHY_UTMI_WIDE:
portsc |= PORT_PTS_PTW;
/* fall through */
case FSL_USB2_PHY_UTMI:
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
break;
}
ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
}
static void mpc83xx_usb_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct fsl_usb2_platform_data *pdata;
void __iomem *non_ehci = hcd->regs;
u32 temp;
pdata =
(struct fsl_usb2_platform_data *)hcd->self.controller->
platform_data;
/* Enable PHY interface in the control reg. */
temp = in_be32(non_ehci + FSL_SOC_USB_CTRL);
out_be32(non_ehci + FSL_SOC_USB_CTRL, temp | 0x00000004);
out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
/*
* Turn on cache snooping hardware, since some PowerPC platforms
* wholly rely on hardware to deal with cache coherent
*/
/* Setup Snooping for all the 4GB space */
/* SNOOP1 starts from 0x0, size 2G */
out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
/* SNOOP2 starts from 0x80000000, size 2G */
out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
#endif
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
mpc83xx_setup_phy(ehci, pdata->phy_mode, 0);
if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
unsigned int chip, rev, svr;
svr = mfspr(SPRN_SVR);
chip = svr >> 16;
rev = (svr >> 4) & 0xf;
/* Deal with USB Erratum #14 on MPC834x Rev 1.0 & 1.1 chips */
if ((rev == 1) && (chip >= 0x8050) && (chip <= 0x8055))
ehci->has_fsl_port_bug = 1;
if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
mpc83xx_setup_phy(ehci, pdata->phy_mode, 0);
if (pdata->port_enables & FSL_USB2_PORT1_ENABLED)
mpc83xx_setup_phy(ehci, pdata->phy_mode, 1);
}
/* put controller in host mode. */
ehci_writel(ehci, 0x00000003, non_ehci + FSL_SOC_USB_USBMODE);
#ifdef CONFIG_PPC_85xx
out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
#else
out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
#endif
out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
}
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_fsl_reinit(struct ehci_hcd *ehci)
{
mpc83xx_usb_setup(ehci_to_hcd(ehci));
ehci_port_power(ehci, 0);
return 0;
}
/* called during probe() after chip reset completes */
static int ehci_fsl_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
/* EHCI registers start at offset 0x100 */
ehci->caps = hcd->regs + 0x100;
ehci->regs = hcd->regs + 0x100 +
HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
retval = ehci_halt(ehci);
if (retval)
return retval;
/* data structure init */
retval = ehci_init(hcd);
if (retval)
return retval;
hcd->has_tt = 1;
ehci->sbrn = 0x20;
ehci_reset(ehci);
retval = ehci_fsl_reinit(ehci);
return retval;
}
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_USB2,
/*
* basic lifecycle operations
*/
.reset = ehci_fsl_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_fsl_drv_probe(struct platform_device *pdev)
{
if (usb_disabled())
return -ENODEV;
/* FIXME we only want one one probe() not two */
return usb_hcd_fsl_probe(&ehci_fsl_hc_driver, pdev);
}
static int ehci_fsl_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
/* FIXME we only want one one remove() not two */
usb_hcd_fsl_remove(hcd, pdev);
return 0;
}
MODULE_ALIAS("platform:fsl-ehci");
static struct platform_driver ehci_fsl_driver = {
.probe = ehci_fsl_drv_probe,
.remove = ehci_fsl_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "fsl-ehci",
},
};
| gpl-2.0 |
fanzhidongyzby/linux | arch/arm/mach-shmobile/clock-r8a7778.c | 831 | 13280 | /*
* r8a7778 clock framework support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* based on r8a7779
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* MD MD MD MD PLLA PLLB EXTAL clki clkz
* 19 18 12 11 (HMz) (MHz) (MHz)
*----------------------------------------------------------------------------
* 1 0 0 0 x21 x21 38.00 800 800
* 1 0 0 1 x24 x24 33.33 800 800
* 1 0 1 0 x28 x28 28.50 800 800
* 1 0 1 1 x32 x32 25.00 800 800
* 1 1 0 1 x24 x21 33.33 800 700
* 1 1 1 0 x28 x21 28.50 800 600
* 1 1 1 1 x32 x24 25.00 800 600
*/
#include <linux/io.h>
#include <linux/sh_clk.h>
#include <linux/clkdev.h>
#include "clock.h"
#include "common.h"
#define MSTPCR0 IOMEM(0xffc80030)
#define MSTPCR1 IOMEM(0xffc80034)
#define MSTPCR3 IOMEM(0xffc8003c)
#define MSTPSR1 IOMEM(0xffc80044)
#define MSTPSR4 IOMEM(0xffc80048)
#define MSTPSR6 IOMEM(0xffc8004c)
#define MSTPCR4 IOMEM(0xffc80050)
#define MSTPCR5 IOMEM(0xffc80054)
#define MSTPCR6 IOMEM(0xffc80058)
#define MODEMR 0xFFCC0020
#define MD(nr) BIT(nr)
/* ioremap() through clock mapping mandatory to avoid
* collision with ARM coherent DMA virtual memory range.
*/
static struct clk_mapping cpg_mapping = {
.phys = 0xffc80000,
.len = 0x80,
};
static struct clk extal_clk = {
/* .rate will be updated on r8a7778_clock_init() */
.mapping = &cpg_mapping,
};
static struct clk audio_clk_a = {
};
static struct clk audio_clk_b = {
};
static struct clk audio_clk_c = {
};
/*
* clock ratio of these clock will be updated
* on r8a7778_clock_init()
*/
SH_FIXED_RATIO_CLK_SET(plla_clk, extal_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(pllb_clk, extal_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(i_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(s_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(s1_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(s3_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(s4_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(b_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(out_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(p_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(g_clk, plla_clk, 1, 1);
SH_FIXED_RATIO_CLK_SET(z_clk, pllb_clk, 1, 1);
static struct clk *main_clks[] = {
&extal_clk,
&plla_clk,
&pllb_clk,
&i_clk,
&s_clk,
&s1_clk,
&s3_clk,
&s4_clk,
&b_clk,
&out_clk,
&p_clk,
&g_clk,
&z_clk,
&audio_clk_a,
&audio_clk_b,
&audio_clk_c,
};
enum {
MSTP531, MSTP530,
MSTP529, MSTP528, MSTP527, MSTP526, MSTP525, MSTP524, MSTP523,
MSTP331,
MSTP323, MSTP322, MSTP321,
MSTP311, MSTP310,
MSTP309, MSTP308, MSTP307,
MSTP114,
MSTP110, MSTP109,
MSTP100,
MSTP030,
MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
MSTP016, MSTP015, MSTP012, MSTP011, MSTP010,
MSTP009, MSTP008, MSTP007,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
[MSTP531] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 31, 0), /* SCU0 */
[MSTP530] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 30, 0), /* SCU1 */
[MSTP529] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 29, 0), /* SCU2 */
[MSTP528] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 28, 0), /* SCU3 */
[MSTP527] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 27, 0), /* SCU4 */
[MSTP526] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 26, 0), /* SCU5 */
[MSTP525] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 25, 0), /* SCU6 */
[MSTP524] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 24, 0), /* SCU7 */
[MSTP523] = SH_CLK_MSTP32(&p_clk, MSTPCR5, 23, 0), /* SCU8 */
[MSTP331] = SH_CLK_MSTP32(&s4_clk, MSTPCR3, 31, 0), /* MMC */
[MSTP323] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 23, 0), /* SDHI0 */
[MSTP322] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 22, 0), /* SDHI1 */
[MSTP321] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 21, 0), /* SDHI2 */
[MSTP311] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 11, 0), /* SSI4 */
[MSTP310] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 10, 0), /* SSI5 */
[MSTP309] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 9, 0), /* SSI6 */
[MSTP308] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 8, 0), /* SSI7 */
[MSTP307] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 7, 0), /* SSI8 */
[MSTP114] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 14, 0), /* Ether */
[MSTP110] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 10, 0), /* VIN0 */
[MSTP109] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 9, 0), /* VIN1 */
[MSTP100] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 0, 0), /* USB0/1 */
[MSTP030] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 30, 0), /* I2C0 */
[MSTP029] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 29, 0), /* I2C1 */
[MSTP028] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 28, 0), /* I2C2 */
[MSTP027] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 27, 0), /* I2C3 */
[MSTP026] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 26, 0), /* SCIF0 */
[MSTP025] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 25, 0), /* SCIF1 */
[MSTP024] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 24, 0), /* SCIF2 */
[MSTP023] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 23, 0), /* SCIF3 */
[MSTP022] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 22, 0), /* SCIF4 */
[MSTP021] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 21, 0), /* SCIF5 */
[MSTP016] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 16, 0), /* TMU0 */
[MSTP015] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 15, 0), /* TMU1 */
[MSTP012] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 12, 0), /* SSI0 */
[MSTP011] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 11, 0), /* SSI1 */
[MSTP010] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 10, 0), /* SSI2 */
[MSTP009] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 9, 0), /* SSI3 */
[MSTP008] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 8, 0), /* SRU */
[MSTP007] = SH_CLK_MSTP32(&s_clk, MSTPCR0, 7, 0), /* HSPI */
};
static struct clk_lookup lookups[] = {
/* main */
CLKDEV_CON_ID("shyway_clk", &s_clk),
CLKDEV_CON_ID("peripheral_clk", &p_clk),
/* MSTP32 clocks */
CLKDEV_DEV_ID("sh_mmcif", &mstp_clks[MSTP331]), /* MMC */
CLKDEV_DEV_ID("ffe4e000.mmc", &mstp_clks[MSTP331]), /* MMC */
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP323]), /* SDHI0 */
CLKDEV_DEV_ID("ffe4c000.sd", &mstp_clks[MSTP323]), /* SDHI0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */
CLKDEV_DEV_ID("ffe4d000.sd", &mstp_clks[MSTP322]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */
CLKDEV_DEV_ID("ffe4f000.sd", &mstp_clks[MSTP321]), /* SDHI2 */
CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
CLKDEV_DEV_ID("r8a7778-vin.0", &mstp_clks[MSTP110]), /* VIN0 */
CLKDEV_DEV_ID("r8a7778-vin.1", &mstp_clks[MSTP109]), /* VIN1 */
CLKDEV_DEV_ID("ehci-platform", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
CLKDEV_DEV_ID("ohci-platform", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */
CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP100]), /* USB FUNC */
CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */
CLKDEV_DEV_ID("ffc70000.i2c", &mstp_clks[MSTP030]), /* I2C0 */
CLKDEV_DEV_ID("i2c-rcar.1", &mstp_clks[MSTP029]), /* I2C1 */
CLKDEV_DEV_ID("ffc71000.i2c", &mstp_clks[MSTP029]), /* I2C1 */
CLKDEV_DEV_ID("i2c-rcar.2", &mstp_clks[MSTP028]), /* I2C2 */
CLKDEV_DEV_ID("ffc72000.i2c", &mstp_clks[MSTP028]), /* I2C2 */
CLKDEV_DEV_ID("i2c-rcar.3", &mstp_clks[MSTP027]), /* I2C3 */
CLKDEV_DEV_ID("ffc73000.i2c", &mstp_clks[MSTP027]), /* I2C3 */
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */
CLKDEV_DEV_ID("ffe40000.serial", &mstp_clks[MSTP026]), /* SCIF0 */
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */
CLKDEV_DEV_ID("ffe41000.serial", &mstp_clks[MSTP025]), /* SCIF1 */
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */
CLKDEV_DEV_ID("ffe42000.serial", &mstp_clks[MSTP024]), /* SCIF2 */
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP023]), /* SCIF3 */
CLKDEV_DEV_ID("ffe43000.serial", &mstp_clks[MSTP023]), /* SCIF3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP022]), /* SCIF4 */
CLKDEV_DEV_ID("ffe44000.serial", &mstp_clks[MSTP022]), /* SCIF4 */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP021]), /* SCIF6 */
CLKDEV_DEV_ID("ffe45000.serial", &mstp_clks[MSTP021]), /* SCIF5 */
CLKDEV_DEV_ID("sh-hspi.0", &mstp_clks[MSTP007]), /* HSPI0 */
CLKDEV_DEV_ID("fffc7000.spi", &mstp_clks[MSTP007]), /* HSPI0 */
CLKDEV_DEV_ID("sh-hspi.1", &mstp_clks[MSTP007]), /* HSPI1 */
CLKDEV_DEV_ID("fffc8000.spi", &mstp_clks[MSTP007]), /* HSPI1 */
CLKDEV_DEV_ID("sh-hspi.2", &mstp_clks[MSTP007]), /* HSPI2 */
CLKDEV_DEV_ID("fffc6000.spi", &mstp_clks[MSTP007]), /* HSPI2 */
CLKDEV_DEV_ID("rcar_sound", &mstp_clks[MSTP008]), /* SRU */
CLKDEV_ICK_ID("clk_a", "rcar_sound", &audio_clk_a),
CLKDEV_ICK_ID("clk_b", "rcar_sound", &audio_clk_b),
CLKDEV_ICK_ID("clk_c", "rcar_sound", &audio_clk_c),
CLKDEV_ICK_ID("clk_i", "rcar_sound", &s1_clk),
CLKDEV_ICK_ID("ssi.0", "rcar_sound", &mstp_clks[MSTP012]),
CLKDEV_ICK_ID("ssi.1", "rcar_sound", &mstp_clks[MSTP011]),
CLKDEV_ICK_ID("ssi.2", "rcar_sound", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("ssi.3", "rcar_sound", &mstp_clks[MSTP009]),
CLKDEV_ICK_ID("ssi.4", "rcar_sound", &mstp_clks[MSTP311]),
CLKDEV_ICK_ID("ssi.5", "rcar_sound", &mstp_clks[MSTP310]),
CLKDEV_ICK_ID("ssi.6", "rcar_sound", &mstp_clks[MSTP309]),
CLKDEV_ICK_ID("ssi.7", "rcar_sound", &mstp_clks[MSTP308]),
CLKDEV_ICK_ID("ssi.8", "rcar_sound", &mstp_clks[MSTP307]),
CLKDEV_ICK_ID("src.0", "rcar_sound", &mstp_clks[MSTP531]),
CLKDEV_ICK_ID("src.1", "rcar_sound", &mstp_clks[MSTP530]),
CLKDEV_ICK_ID("src.2", "rcar_sound", &mstp_clks[MSTP529]),
CLKDEV_ICK_ID("src.3", "rcar_sound", &mstp_clks[MSTP528]),
CLKDEV_ICK_ID("src.4", "rcar_sound", &mstp_clks[MSTP527]),
CLKDEV_ICK_ID("src.5", "rcar_sound", &mstp_clks[MSTP526]),
CLKDEV_ICK_ID("src.6", "rcar_sound", &mstp_clks[MSTP525]),
CLKDEV_ICK_ID("src.7", "rcar_sound", &mstp_clks[MSTP524]),
CLKDEV_ICK_ID("src.8", "rcar_sound", &mstp_clks[MSTP523]),
CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]),
CLKDEV_ICK_ID("fck", "ffd80000.timer", &mstp_clks[MSTP016]),
CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP015]),
CLKDEV_ICK_ID("fck", "ffd81000.timer", &mstp_clks[MSTP015]),
};
void __init r8a7778_clock_init(void)
{
void __iomem *modemr = ioremap_nocache(MODEMR, PAGE_SIZE);
u32 mode;
int k, ret = 0;
BUG_ON(!modemr);
mode = ioread32(modemr);
iounmap(modemr);
switch (mode & (MD(19) | MD(18) | MD(12) | MD(11))) {
case MD(19):
extal_clk.rate = 38000000;
SH_CLK_SET_RATIO(&plla_clk_ratio, 21, 1);
SH_CLK_SET_RATIO(&pllb_clk_ratio, 21, 1);
break;
case MD(19) | MD(11):
extal_clk.rate = 33333333;
SH_CLK_SET_RATIO(&plla_clk_ratio, 24, 1);
SH_CLK_SET_RATIO(&pllb_clk_ratio, 24, 1);
break;
case MD(19) | MD(12):
extal_clk.rate = 28500000;
SH_CLK_SET_RATIO(&plla_clk_ratio, 28, 1);
SH_CLK_SET_RATIO(&pllb_clk_ratio, 28, 1);
break;
case MD(19) | MD(12) | MD(11):
extal_clk.rate = 25000000;
SH_CLK_SET_RATIO(&plla_clk_ratio, 32, 1);
SH_CLK_SET_RATIO(&pllb_clk_ratio, 32, 1);
break;
case MD(19) | MD(18) | MD(11):
extal_clk.rate = 33333333;
SH_CLK_SET_RATIO(&plla_clk_ratio, 24, 1);
SH_CLK_SET_RATIO(&pllb_clk_ratio, 21, 1);
break;
case MD(19) | MD(18) | MD(12):
extal_clk.rate = 28500000;
SH_CLK_SET_RATIO(&plla_clk_ratio, 28, 1);
SH_CLK_SET_RATIO(&pllb_clk_ratio, 21, 1);
break;
case MD(19) | MD(18) | MD(12) | MD(11):
extal_clk.rate = 25000000;
SH_CLK_SET_RATIO(&plla_clk_ratio, 32, 1);
SH_CLK_SET_RATIO(&pllb_clk_ratio, 24, 1);
break;
default:
BUG();
}
if (mode & MD(1)) {
SH_CLK_SET_RATIO(&i_clk_ratio, 1, 1);
SH_CLK_SET_RATIO(&s_clk_ratio, 1, 3);
SH_CLK_SET_RATIO(&s1_clk_ratio, 1, 6);
SH_CLK_SET_RATIO(&s3_clk_ratio, 1, 4);
SH_CLK_SET_RATIO(&s4_clk_ratio, 1, 8);
SH_CLK_SET_RATIO(&p_clk_ratio, 1, 12);
SH_CLK_SET_RATIO(&g_clk_ratio, 1, 12);
if (mode & MD(2)) {
SH_CLK_SET_RATIO(&b_clk_ratio, 1, 18);
SH_CLK_SET_RATIO(&out_clk_ratio, 1, 18);
} else {
SH_CLK_SET_RATIO(&b_clk_ratio, 1, 12);
SH_CLK_SET_RATIO(&out_clk_ratio, 1, 12);
}
} else {
SH_CLK_SET_RATIO(&i_clk_ratio, 1, 1);
SH_CLK_SET_RATIO(&s_clk_ratio, 1, 4);
SH_CLK_SET_RATIO(&s1_clk_ratio, 1, 8);
SH_CLK_SET_RATIO(&s3_clk_ratio, 1, 4);
SH_CLK_SET_RATIO(&s4_clk_ratio, 1, 8);
SH_CLK_SET_RATIO(&p_clk_ratio, 1, 16);
SH_CLK_SET_RATIO(&g_clk_ratio, 1, 12);
if (mode & MD(2)) {
SH_CLK_SET_RATIO(&b_clk_ratio, 1, 16);
SH_CLK_SET_RATIO(&out_clk_ratio, 1, 16);
} else {
SH_CLK_SET_RATIO(&b_clk_ratio, 1, 12);
SH_CLK_SET_RATIO(&out_clk_ratio, 1, 12);
}
}
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
shmobile_clk_init();
else
panic("failed to setup r8a7778 clocks\n");
}
| gpl-2.0 |
keks2293/kernel_zte | fs/cifs/transport.c | 1855 | 27042 | /*
* fs/cifs/transport.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org) 2006.
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/gfp.h>
#include <linux/wait.h>
#include <linux/net.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/tcp.h>
#include <linux/highmem.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <linux/mempool.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
void
cifs_wake_up_task(struct mid_q_entry *mid)
{
wake_up_process(mid->callback_data);
}
struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
{
struct mid_q_entry *temp;
if (server == NULL) {
cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
return NULL;
}
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
if (temp == NULL)
return temp;
else {
memset(temp, 0, sizeof(struct mid_q_entry));
temp->mid = smb_buffer->Mid; /* always LE */
temp->pid = current->pid;
temp->command = cpu_to_le16(smb_buffer->Command);
cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
temp->server = server;
/*
* The default is for the mid to be synchronous, so the
* default callback just wakes up the current task.
*/
temp->callback = cifs_wake_up_task;
temp->callback_data = current;
}
atomic_inc(&midCount);
temp->mid_state = MID_REQUEST_ALLOCATED;
return temp;
}
void
DeleteMidQEntry(struct mid_q_entry *midEntry)
{
#ifdef CONFIG_CIFS_STATS2
__le16 command = midEntry->server->vals->lock_cmd;
unsigned long now;
#endif
midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
if (midEntry->large_buf)
cifs_buf_release(midEntry->resp_buf);
else
cifs_small_buf_release(midEntry->resp_buf);
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
/* commands taking longer than one second are indications that
something is wrong, unless it is quite a slow link or server */
if ((now - midEntry->when_alloc) > HZ) {
if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
now - midEntry->when_alloc,
now - midEntry->when_sent,
now - midEntry->when_received);
}
}
#endif
mempool_free(midEntry, cifs_mid_poolp);
}
void
cifs_delete_mid(struct mid_q_entry *mid)
{
spin_lock(&GlobalMid_Lock);
list_del(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(mid);
}
/*
* smb_send_kvec - send an array of kvecs to the server
* @server: Server to send the data to
* @iov: Pointer to array of kvecs
* @n_vec: length of kvec array
* @sent: amount of data sent on socket is stored here
*
* Our basic "send data to server" function. Should be called with srv_mutex
* held. The caller is responsible for handling the results.
*/
static int
smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
size_t *sent)
{
int rc = 0;
int i = 0;
struct msghdr smb_msg;
unsigned int remaining;
size_t first_vec = 0;
struct socket *ssocket = server->ssocket;
*sent = 0;
smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
smb_msg.msg_namelen = sizeof(struct sockaddr);
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
if (server->noblocksnd)
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
smb_msg.msg_flags = MSG_NOSIGNAL;
remaining = 0;
for (i = 0; i < n_vec; i++)
remaining += iov[i].iov_len;
i = 0;
while (remaining) {
/*
* If blocking send, we try 3 times, since each can block
* for 5 seconds. For nonblocking we have to try more
* but wait increasing amounts of time allowing time for
* socket to clear. The overall time we wait in either
* case to send on the socket is about 15 seconds.
* Similarly we wait for 15 seconds for a response from
* the server in SendReceive[2] for the server to send
* a response back for most types of requests (except
* SMB Write past end of file which can be slow, and
* blocking lock operations). NFS waits slightly longer
* than CIFS, but this can make it take longer for
* nonresponsive servers to be detected and 15 seconds
* is more than enough time for modern networks to
* send a packet. In most cases if we fail to send
* after the retries we will kill the socket and
* reconnect which may clear the network problem.
*/
rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
n_vec - first_vec, remaining);
if (rc == -EAGAIN) {
i++;
if (i >= 14 || (!server->noblocksnd && (i > 2))) {
cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
ssocket);
rc = -EAGAIN;
break;
}
msleep(1 << i);
continue;
}
if (rc < 0)
break;
/* send was at least partially successful */
*sent += rc;
if (rc == remaining) {
remaining = 0;
break;
}
if (rc > remaining) {
cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
break;
}
if (rc == 0) {
/* should never happen, letting socket clear before
retrying is our only obvious option here */
cifs_dbg(VFS, "tcp sent no data\n");
msleep(500);
continue;
}
remaining -= rc;
/* the line below resets i */
for (i = first_vec; i < n_vec; i++) {
if (iov[i].iov_len) {
if (rc > iov[i].iov_len) {
rc -= iov[i].iov_len;
iov[i].iov_len = 0;
} else {
iov[i].iov_base += rc;
iov[i].iov_len -= rc;
first_vec = i;
break;
}
}
}
i = 0; /* in case we get ENOSPC on the next send */
rc = 0;
}
return rc;
}
/**
* rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
* @rqst: pointer to smb_rqst
* @idx: index into the array of the page
* @iov: pointer to struct kvec that will hold the result
*
* Helper function to convert a slot in the rqst->rq_pages array into a kvec.
* The page will be kmapped and the address placed into iov_base. The length
* will then be adjusted according to the ptailoff.
*/
void
cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
struct kvec *iov)
{
/*
* FIXME: We could avoid this kmap altogether if we used
* kernel_sendpage instead of kernel_sendmsg. That will only
* work if signing is disabled though as sendpage inlines the
* page directly into the fraglist. If userspace modifies the
* page after we calculate the signature, then the server will
* reject it and may break the connection. kernel_sendmsg does
* an extra copy of the data and avoids that issue.
*/
iov->iov_base = kmap(rqst->rq_pages[idx]);
/* if last page, don't send beyond this offset into page */
if (idx == (rqst->rq_npages - 1))
iov->iov_len = rqst->rq_tailsz;
else
iov->iov_len = rqst->rq_pagesz;
}
static int
smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
int rc;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
unsigned int i;
size_t total_len = 0, sent;
struct socket *ssocket = server->ssocket;
int val = 1;
if (ssocket == NULL)
return -ENOTSOCK;
cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
dump_smb(iov[0].iov_base, iov[0].iov_len);
/* cork the socket */
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
rc = smb_send_kvec(server, iov, n_vec, &sent);
if (rc < 0)
goto uncork;
total_len += sent;
/* now walk the page array and send each page in it */
for (i = 0; i < rqst->rq_npages; i++) {
struct kvec p_iov;
cifs_rqst_page_to_kvec(rqst, i, &p_iov);
rc = smb_send_kvec(server, &p_iov, 1, &sent);
kunmap(rqst->rq_pages[i]);
if (rc < 0)
break;
total_len += sent;
}
uncork:
/* uncork it */
val = 0;
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
smb_buf_length + 4, total_len);
/*
* If we have only sent part of an SMB then the next SMB could
* be taken as the remainder of this one. We need to kill the
* socket so the server throws away the partial SMB
*/
server->tcpStatus = CifsNeedReconnect;
}
if (rc < 0 && rc != -EINTR)
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
else
rc = 0;
return rc;
}
static int
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
struct smb_rqst rqst = { .rq_iov = iov,
.rq_nvec = n_vec };
return smb_send_rqst(server, &rqst);
}
int
smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
unsigned int smb_buf_length)
{
struct kvec iov;
iov.iov_base = smb_buffer;
iov.iov_len = smb_buf_length + 4;
return smb_sendv(server, &iov, 1);
}
static int
wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
int *credits)
{
int rc;
spin_lock(&server->req_lock);
if (timeout == CIFS_ASYNC_OP) {
/* oplock breaks must not be held up */
server->in_flight++;
*credits -= 1;
spin_unlock(&server->req_lock);
return 0;
}
while (1) {
if (*credits <= 0) {
spin_unlock(&server->req_lock);
cifs_num_waiters_inc(server);
rc = wait_event_killable(server->request_q,
has_credits(server, credits));
cifs_num_waiters_dec(server);
if (rc)
return rc;
spin_lock(&server->req_lock);
} else {
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->req_lock);
return -ENOENT;
}
/*
* Can not count locking commands against total
* as they are allowed to block on server.
*/
/* update # of requests on the wire to server */
if (timeout != CIFS_BLOCKING_OP) {
*credits -= 1;
server->in_flight++;
}
spin_unlock(&server->req_lock);
break;
}
}
return 0;
}
static int
wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
const int optype)
{
return wait_for_free_credits(server, timeout,
server->ops->get_credits_field(server, optype));
}
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
if (ses->server->tcpStatus == CifsExiting) {
return -ENOENT;
}
if (ses->server->tcpStatus == CifsNeedReconnect) {
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN;
}
if (ses->status != CifsGood) {
/* check if SMB session is bad because we are setting it up */
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE))
return -EAGAIN;
/* else ok - we are setting up session */
}
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
if (*ppmidQ == NULL)
return -ENOMEM;
spin_lock(&GlobalMid_Lock);
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
return 0;
}
static int
wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
{
int error;
error = wait_event_freezekillable_unsafe(server->response_q,
midQ->mid_state != MID_REQUEST_SUBMITTED);
if (error < 0)
return -ERESTARTSYS;
return 0;
}
struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
int rc;
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
/* enable signing if server requires it */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
mid = AllocMidQEntry(hdr, server);
if (mid == NULL)
return ERR_PTR(-ENOMEM);
rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
if (rc) {
DeleteMidQEntry(mid);
return ERR_PTR(rc);
}
return mid;
}
/*
* Send a SMB request and set the callback function in the mid to handle
* the result. Caller is responsible for dealing with timeouts.
*/
int
cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid_receive_t *receive, mid_callback_t *callback,
void *cbdata, const int flags)
{
int rc, timeout, optype;
struct mid_q_entry *mid;
timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
rc = wait_for_free_request(server, timeout, optype);
if (rc)
return rc;
mutex_lock(&server->srv_mutex);
mid = server->ops->setup_async_request(server, rqst);
if (IS_ERR(mid)) {
mutex_unlock(&server->srv_mutex);
add_credits(server, 1, optype);
wake_up(&server->request_q);
return PTR_ERR(mid);
}
mid->receive = receive;
mid->callback = callback;
mid->callback_data = cbdata;
mid->mid_state = MID_REQUEST_SUBMITTED;
/* put it on the pending_mid_q */
spin_lock(&GlobalMid_Lock);
list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
cifs_in_send_inc(server);
rc = smb_send_rqst(server, rqst);
cifs_in_send_dec(server);
cifs_save_when_sent(mid);
if (rc < 0)
server->sequence_number -= 2;
mutex_unlock(&server->srv_mutex);
if (rc == 0)
return 0;
cifs_delete_mid(mid);
add_credits(server, 1, optype);
wake_up(&server->request_q);
return rc;
}
/*
*
* Send an SMB Request. No response info (other than return code)
* needs to be parsed.
*
* flags indicate the type of request buffer and how long to wait
* and whether to log NT STATUS code (error) before mapping it to POSIX error
*
*/
int
SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags)
{
int rc;
struct kvec iov[1];
int resp_buf_type;
iov[0].iov_base = in_buf;
iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
flags |= CIFS_NO_RESP;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
return rc;
}
static int
cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
{
int rc = 0;
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
spin_lock(&GlobalMid_Lock);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
spin_unlock(&GlobalMid_Lock);
return rc;
case MID_RETRY_NEEDED:
rc = -EAGAIN;
break;
case MID_RESPONSE_MALFORMED:
rc = -EIO;
break;
case MID_SHUTDOWN:
rc = -EHOSTDOWN;
break;
default:
list_del_init(&mid->qhead);
cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
}
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(mid);
return rc;
}
static inline int
send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
{
return server->ops->send_cancel ?
server->ops->send_cancel(server, buf, mid) : 0;
}
int
cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
bool log_error)
{
unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
dump_smb(mid->resp_buf, min_t(u32, 92, len));
/* convert the length into a more usable form */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
struct kvec iov;
int rc = 0;
struct smb_rqst rqst = { .rq_iov = &iov,
.rq_nvec = 1 };
iov.iov_base = mid->resp_buf;
iov.iov_len = len;
/* FIXME: add code to kill session */
rc = cifs_verify_signature(&rqst, server,
mid->sequence_number);
if (rc)
cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
rc);
}
/* BB special case reconnect tid and uid here? */
return map_smb_to_linux_error(mid->resp_buf, log_error);
}
struct mid_q_entry *
cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
{
int rc;
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
rc = allocate_mid(ses, hdr, &mid);
if (rc)
return ERR_PTR(rc);
rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
if (rc) {
cifs_delete_mid(mid);
return ERR_PTR(rc);
}
return mid;
}
int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
const int flags)
{
int rc = 0;
int timeout, optype;
struct mid_q_entry *midQ;
char *buf = iov[0].iov_base;
unsigned int credits = 1;
struct smb_rqst rqst = { .rq_iov = iov,
.rq_nvec = n_vec };
timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
*resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
if ((ses == NULL) || (ses->server == NULL)) {
cifs_small_buf_release(buf);
cifs_dbg(VFS, "Null session\n");
return -EIO;
}
if (ses->server->tcpStatus == CifsExiting) {
cifs_small_buf_release(buf);
return -ENOENT;
}
/*
* Ensure that we do not send more than 50 overlapping requests
* to the same server. We may make this configurable later or
* use ses->maxReq.
*/
rc = wait_for_free_request(ses->server, timeout, optype);
if (rc) {
cifs_small_buf_release(buf);
return rc;
}
/*
* Make sure that we sign in the same order that we send on this socket
* and avoid races inside tcp sendmsg code that could cause corruption
* of smb data.
*/
mutex_lock(&ses->server->srv_mutex);
midQ = ses->server->ops->setup_request(ses, &rqst);
if (IS_ERR(midQ)) {
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(buf);
/* Update # of requests on wire to server */
add_credits(ses->server, 1, optype);
return PTR_ERR(midQ);
}
midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_sendv(ses->server, iov, n_vec);
cifs_in_send_dec(ses->server);
cifs_save_when_sent(midQ);
if (rc < 0)
ses->server->sequence_number -= 2;
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
cifs_small_buf_release(buf);
goto out;
}
if (timeout == CIFS_ASYNC_OP) {
cifs_small_buf_release(buf);
goto out;
}
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
send_cancel(ses->server, buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(buf);
add_credits(ses->server, 1, optype);
return rc;
}
spin_unlock(&GlobalMid_Lock);
}
cifs_small_buf_release(buf);
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
add_credits(ses->server, 1, optype);
return rc;
}
if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cifs_dbg(FYI, "Bad MID state?\n");
goto out;
}
buf = (char *)midQ->resp_buf;
iov[0].iov_base = buf;
iov[0].iov_len = get_rfc1002_length(buf) + 4;
if (midQ->large_buf)
*resp_buf_type = CIFS_LARGE_BUFFER;
else
*resp_buf_type = CIFS_SMALL_BUFFER;
credits = ses->server->ops->get_credits(midQ);
rc = ses->server->ops->check_receive(midQ, ses->server,
flags & CIFS_LOG_ERROR);
/* mark it so buf will not be freed by cifs_delete_mid */
if ((flags & CIFS_NO_RESP) == 0)
midQ->resp_buf = NULL;
out:
cifs_delete_mid(midQ);
add_credits(ses->server, credits, optype);
return rc;
}
int
SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
int *pbytes_returned, const int timeout)
{
int rc = 0;
struct mid_q_entry *midQ;
if (ses == NULL) {
cifs_dbg(VFS, "Null smb session\n");
return -EIO;
}
if (ses->server == NULL) {
cifs_dbg(VFS, "Null tcp session\n");
return -EIO;
}
if (ses->server->tcpStatus == CifsExiting)
return -ENOENT;
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
MAX_CIFS_HDR_SIZE - 4) {
cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
be32_to_cpu(in_buf->smb_buf_length));
return -EIO;
}
rc = wait_for_free_request(ses->server, timeout, 0);
if (rc)
return rc;
/* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
mutex_lock(&ses->server->srv_mutex);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
/* Update # of requests on wire to server */
add_credits(ses->server, 1, 0);
return rc;
}
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
goto out;
}
midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
cifs_in_send_dec(ses->server);
cifs_save_when_sent(midQ);
if (rc < 0)
ses->server->sequence_number -= 2;
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0)
goto out;
if (timeout == CIFS_ASYNC_OP)
goto out;
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
send_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
add_credits(ses->server, 1, 0);
return rc;
}
spin_unlock(&GlobalMid_Lock);
}
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
add_credits(ses->server, 1, 0);
return rc;
}
if (!midQ->resp_buf || !out_buf ||
midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cifs_dbg(VFS, "Bad MID state?\n");
goto out;
}
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, ses->server, 0);
out:
cifs_delete_mid(midQ);
add_credits(ses->server, 1, 0);
return rc;
}
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
blocking lock to return. */
static int
send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_hdr *in_buf,
struct smb_hdr *out_buf)
{
int bytes_returned;
struct cifs_ses *ses = tcon->ses;
LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
/* We just modify the current in_buf to change
the type of lock from LOCKING_ANDX_SHARED_LOCK
or LOCKING_ANDX_EXCLUSIVE_LOCK to
LOCKING_ANDX_CANCEL_LOCK. */
pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
pSMB->Timeout = 0;
pSMB->hdr.Mid = get_next_mid(ses->server);
return SendReceive(xid, ses, in_buf, out_buf,
&bytes_returned, 0);
}
int
SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
int *pbytes_returned)
{
int rc = 0;
int rstart = 0;
struct mid_q_entry *midQ;
struct cifs_ses *ses;
if (tcon == NULL || tcon->ses == NULL) {
cifs_dbg(VFS, "Null smb session\n");
return -EIO;
}
ses = tcon->ses;
if (ses->server == NULL) {
cifs_dbg(VFS, "Null tcp session\n");
return -EIO;
}
if (ses->server->tcpStatus == CifsExiting)
return -ENOENT;
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
MAX_CIFS_HDR_SIZE - 4) {
cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
be32_to_cpu(in_buf->smb_buf_length));
return -EIO;
}
rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
if (rc)
return rc;
/* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
mutex_lock(&ses->server->srv_mutex);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
if (rc) {
cifs_delete_mid(midQ);
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
cifs_in_send_dec(ses->server);
cifs_save_when_sent(midQ);
if (rc < 0)
ses->server->sequence_number -= 2;
mutex_unlock(&ses->server->srv_mutex);
if (rc < 0) {
cifs_delete_mid(midQ);
return rc;
}
/* Wait for a reply - allow signals to interrupt. */
rc = wait_event_interruptible(ses->server->response_q,
(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
if ((rc == -ERESTARTSYS) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
((ses->server->tcpStatus == CifsGood) ||
(ses->server->tcpStatus == CifsNew))) {
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
rc = send_cancel(ses->server, in_buf, midQ);
if (rc) {
cifs_delete_mid(midQ);
return rc;
}
} else {
/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
to cause the blocking lock to return. */
rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
/* If we get -ENOLCK back the lock may have
already been removed. Don't exit in this case. */
if (rc && rc != -ENOLCK) {
cifs_delete_mid(midQ);
return rc;
}
}
rc = wait_for_response(ses->server, midQ);
if (rc) {
send_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
return rc;
}
spin_unlock(&GlobalMid_Lock);
}
/* We got the response - restart system call. */
rstart = 1;
}
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0)
return rc;
/* rcvd frame is ok */
if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
rc = -EIO;
cifs_dbg(VFS, "Bad MID state?\n");
goto out;
}
*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
rc = cifs_check_receive(midQ, ses->server, 0);
out:
cifs_delete_mid(midQ);
if (rstart && rc == -EACCES)
return -ERESTARTSYS;
return rc;
}
| gpl-2.0 |
mdeejay/virtuous-s4v-kernel | sound/soc/codecs/wm8994-tables.c | 2367 | 98016 | #include "wm8994.h"
const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE] = {
{ 0xFFFF, 0xFFFF }, /* R0 - Software Reset */
{ 0x3B37, 0x3B37 }, /* R1 - Power Management (1) */
{ 0x6BF0, 0x6BF0 }, /* R2 - Power Management (2) */
{ 0x3FF0, 0x3FF0 }, /* R3 - Power Management (3) */
{ 0x3F3F, 0x3F3F }, /* R4 - Power Management (4) */
{ 0x3F0F, 0x3F0F }, /* R5 - Power Management (5) */
{ 0x003F, 0x003F }, /* R6 - Power Management (6) */
{ 0x0000, 0x0000 }, /* R7 */
{ 0x0000, 0x0000 }, /* R8 */
{ 0x0000, 0x0000 }, /* R9 */
{ 0x0000, 0x0000 }, /* R10 */
{ 0x0000, 0x0000 }, /* R11 */
{ 0x0000, 0x0000 }, /* R12 */
{ 0x0000, 0x0000 }, /* R13 */
{ 0x0000, 0x0000 }, /* R14 */
{ 0x0000, 0x0000 }, /* R15 */
{ 0x0000, 0x0000 }, /* R16 */
{ 0x0000, 0x0000 }, /* R17 */
{ 0x0000, 0x0000 }, /* R18 */
{ 0x0000, 0x0000 }, /* R19 */
{ 0x0000, 0x0000 }, /* R20 */
{ 0x01C0, 0x01C0 }, /* R21 - Input Mixer (1) */
{ 0x0000, 0x0000 }, /* R22 */
{ 0x0000, 0x0000 }, /* R23 */
{ 0x00DF, 0x01DF }, /* R24 - Left Line Input 1&2 Volume */
{ 0x00DF, 0x01DF }, /* R25 - Left Line Input 3&4 Volume */
{ 0x00DF, 0x01DF }, /* R26 - Right Line Input 1&2 Volume */
{ 0x00DF, 0x01DF }, /* R27 - Right Line Input 3&4 Volume */
{ 0x00FF, 0x01FF }, /* R28 - Left Output Volume */
{ 0x00FF, 0x01FF }, /* R29 - Right Output Volume */
{ 0x0077, 0x0077 }, /* R30 - Line Outputs Volume */
{ 0x0030, 0x0030 }, /* R31 - HPOUT2 Volume */
{ 0x00FF, 0x01FF }, /* R32 - Left OPGA Volume */
{ 0x00FF, 0x01FF }, /* R33 - Right OPGA Volume */
{ 0x007F, 0x007F }, /* R34 - SPKMIXL Attenuation */
{ 0x017F, 0x017F }, /* R35 - SPKMIXR Attenuation */
{ 0x003F, 0x003F }, /* R36 - SPKOUT Mixers */
{ 0x003F, 0x003F }, /* R37 - ClassD */
{ 0x00FF, 0x01FF }, /* R38 - Speaker Volume Left */
{ 0x00FF, 0x01FF }, /* R39 - Speaker Volume Right */
{ 0x00FF, 0x00FF }, /* R40 - Input Mixer (2) */
{ 0x01B7, 0x01B7 }, /* R41 - Input Mixer (3) */
{ 0x01B7, 0x01B7 }, /* R42 - Input Mixer (4) */
{ 0x01C7, 0x01C7 }, /* R43 - Input Mixer (5) */
{ 0x01C7, 0x01C7 }, /* R44 - Input Mixer (6) */
{ 0x01FF, 0x01FF }, /* R45 - Output Mixer (1) */
{ 0x01FF, 0x01FF }, /* R46 - Output Mixer (2) */
{ 0x0FFF, 0x0FFF }, /* R47 - Output Mixer (3) */
{ 0x0FFF, 0x0FFF }, /* R48 - Output Mixer (4) */
{ 0x0FFF, 0x0FFF }, /* R49 - Output Mixer (5) */
{ 0x0FFF, 0x0FFF }, /* R50 - Output Mixer (6) */
{ 0x0038, 0x0038 }, /* R51 - HPOUT2 Mixer */
{ 0x0077, 0x0077 }, /* R52 - Line Mixer (1) */
{ 0x0077, 0x0077 }, /* R53 - Line Mixer (2) */
{ 0x03FF, 0x03FF }, /* R54 - Speaker Mixer */
{ 0x00C1, 0x00C1 }, /* R55 - Additional Control */
{ 0x00F0, 0x00F0 }, /* R56 - AntiPOP (1) */
{ 0x01EF, 0x01EF }, /* R57 - AntiPOP (2) */
{ 0x00FF, 0x00FF }, /* R58 - MICBIAS */
{ 0x000F, 0x000F }, /* R59 - LDO 1 */
{ 0x0007, 0x0007 }, /* R60 - LDO 2 */
{ 0xFFFF, 0xFFFF }, /* R61 */
{ 0xFFFF, 0xFFFF }, /* R62 */
{ 0x0000, 0x0000 }, /* R63 */
{ 0x0000, 0x0000 }, /* R64 */
{ 0x0000, 0x0000 }, /* R65 */
{ 0x0000, 0x0000 }, /* R66 */
{ 0x0000, 0x0000 }, /* R67 */
{ 0x0000, 0x0000 }, /* R68 */
{ 0x0000, 0x0000 }, /* R69 */
{ 0x0000, 0x0000 }, /* R70 */
{ 0x0000, 0x0000 }, /* R71 */
{ 0x0000, 0x0000 }, /* R72 */
{ 0x0000, 0x0000 }, /* R73 */
{ 0x0000, 0x0000 }, /* R74 */
{ 0x0000, 0x0000 }, /* R75 */
{ 0x8000, 0x8000 }, /* R76 - Charge Pump (1) */
{ 0x0000, 0x0000 }, /* R77 */
{ 0x0000, 0x0000 }, /* R78 */
{ 0x0000, 0x0000 }, /* R79 */
{ 0x0000, 0x0000 }, /* R80 */
{ 0x0301, 0x0301 }, /* R81 - Class W (1) */
{ 0x0000, 0x0000 }, /* R82 */
{ 0x0000, 0x0000 }, /* R83 */
{ 0x333F, 0x333F }, /* R84 - DC Servo (1) */
{ 0x0FEF, 0x0FEF }, /* R85 - DC Servo (2) */
{ 0x0000, 0x0000 }, /* R86 */
{ 0xFFFF, 0xFFFF }, /* R87 - DC Servo (4) */
{ 0x0333, 0x0000 }, /* R88 - DC Servo Readback */
{ 0x0000, 0x0000 }, /* R89 */
{ 0x0000, 0x0000 }, /* R90 */
{ 0x0000, 0x0000 }, /* R91 */
{ 0x0000, 0x0000 }, /* R92 */
{ 0x0000, 0x0000 }, /* R93 */
{ 0x0000, 0x0000 }, /* R94 */
{ 0x0000, 0x0000 }, /* R95 */
{ 0x00EE, 0x00EE }, /* R96 - Analogue HP (1) */
{ 0x0000, 0x0000 }, /* R97 */
{ 0x0000, 0x0000 }, /* R98 */
{ 0x0000, 0x0000 }, /* R99 */
{ 0x0000, 0x0000 }, /* R100 */
{ 0x0000, 0x0000 }, /* R101 */
{ 0x0000, 0x0000 }, /* R102 */
{ 0x0000, 0x0000 }, /* R103 */
{ 0x0000, 0x0000 }, /* R104 */
{ 0x0000, 0x0000 }, /* R105 */
{ 0x0000, 0x0000 }, /* R106 */
{ 0x0000, 0x0000 }, /* R107 */
{ 0x0000, 0x0000 }, /* R108 */
{ 0x0000, 0x0000 }, /* R109 */
{ 0x0000, 0x0000 }, /* R110 */
{ 0x0000, 0x0000 }, /* R111 */
{ 0x0000, 0x0000 }, /* R112 */
{ 0x0000, 0x0000 }, /* R113 */
{ 0x0000, 0x0000 }, /* R114 */
{ 0x0000, 0x0000 }, /* R115 */
{ 0x0000, 0x0000 }, /* R116 */
{ 0x0000, 0x0000 }, /* R117 */
{ 0x0000, 0x0000 }, /* R118 */
{ 0x0000, 0x0000 }, /* R119 */
{ 0x0000, 0x0000 }, /* R120 */
{ 0x0000, 0x0000 }, /* R121 */
{ 0x0000, 0x0000 }, /* R122 */
{ 0x0000, 0x0000 }, /* R123 */
{ 0x0000, 0x0000 }, /* R124 */
{ 0x0000, 0x0000 }, /* R125 */
{ 0x0000, 0x0000 }, /* R126 */
{ 0x0000, 0x0000 }, /* R127 */
{ 0x0000, 0x0000 }, /* R128 */
{ 0x0000, 0x0000 }, /* R129 */
{ 0x0000, 0x0000 }, /* R130 */
{ 0x0000, 0x0000 }, /* R131 */
{ 0x0000, 0x0000 }, /* R132 */
{ 0x0000, 0x0000 }, /* R133 */
{ 0x0000, 0x0000 }, /* R134 */
{ 0x0000, 0x0000 }, /* R135 */
{ 0x0000, 0x0000 }, /* R136 */
{ 0x0000, 0x0000 }, /* R137 */
{ 0x0000, 0x0000 }, /* R138 */
{ 0x0000, 0x0000 }, /* R139 */
{ 0x0000, 0x0000 }, /* R140 */
{ 0x0000, 0x0000 }, /* R141 */
{ 0x0000, 0x0000 }, /* R142 */
{ 0x0000, 0x0000 }, /* R143 */
{ 0x0000, 0x0000 }, /* R144 */
{ 0x0000, 0x0000 }, /* R145 */
{ 0x0000, 0x0000 }, /* R146 */
{ 0x0000, 0x0000 }, /* R147 */
{ 0x0000, 0x0000 }, /* R148 */
{ 0x0000, 0x0000 }, /* R149 */
{ 0x0000, 0x0000 }, /* R150 */
{ 0x0000, 0x0000 }, /* R151 */
{ 0x0000, 0x0000 }, /* R152 */
{ 0x0000, 0x0000 }, /* R153 */
{ 0x0000, 0x0000 }, /* R154 */
{ 0x0000, 0x0000 }, /* R155 */
{ 0x0000, 0x0000 }, /* R156 */
{ 0x0000, 0x0000 }, /* R157 */
{ 0x0000, 0x0000 }, /* R158 */
{ 0x0000, 0x0000 }, /* R159 */
{ 0x0000, 0x0000 }, /* R160 */
{ 0x0000, 0x0000 }, /* R161 */
{ 0x0000, 0x0000 }, /* R162 */
{ 0x0000, 0x0000 }, /* R163 */
{ 0x0000, 0x0000 }, /* R164 */
{ 0x0000, 0x0000 }, /* R165 */
{ 0x0000, 0x0000 }, /* R166 */
{ 0x0000, 0x0000 }, /* R167 */
{ 0x0000, 0x0000 }, /* R168 */
{ 0x0000, 0x0000 }, /* R169 */
{ 0x0000, 0x0000 }, /* R170 */
{ 0x0000, 0x0000 }, /* R171 */
{ 0x0000, 0x0000 }, /* R172 */
{ 0x0000, 0x0000 }, /* R173 */
{ 0x0000, 0x0000 }, /* R174 */
{ 0x0000, 0x0000 }, /* R175 */
{ 0x0000, 0x0000 }, /* R176 */
{ 0x0000, 0x0000 }, /* R177 */
{ 0x0000, 0x0000 }, /* R178 */
{ 0x0000, 0x0000 }, /* R179 */
{ 0x0000, 0x0000 }, /* R180 */
{ 0x0000, 0x0000 }, /* R181 */
{ 0x0000, 0x0000 }, /* R182 */
{ 0x0000, 0x0000 }, /* R183 */
{ 0x0000, 0x0000 }, /* R184 */
{ 0x0000, 0x0000 }, /* R185 */
{ 0x0000, 0x0000 }, /* R186 */
{ 0x0000, 0x0000 }, /* R187 */
{ 0x0000, 0x0000 }, /* R188 */
{ 0x0000, 0x0000 }, /* R189 */
{ 0x0000, 0x0000 }, /* R190 */
{ 0x0000, 0x0000 }, /* R191 */
{ 0x0000, 0x0000 }, /* R192 */
{ 0x0000, 0x0000 }, /* R193 */
{ 0x0000, 0x0000 }, /* R194 */
{ 0x0000, 0x0000 }, /* R195 */
{ 0x0000, 0x0000 }, /* R196 */
{ 0x0000, 0x0000 }, /* R197 */
{ 0x0000, 0x0000 }, /* R198 */
{ 0x0000, 0x0000 }, /* R199 */
{ 0x0000, 0x0000 }, /* R200 */
{ 0x0000, 0x0000 }, /* R201 */
{ 0x0000, 0x0000 }, /* R202 */
{ 0x0000, 0x0000 }, /* R203 */
{ 0x0000, 0x0000 }, /* R204 */
{ 0x0000, 0x0000 }, /* R205 */
{ 0x0000, 0x0000 }, /* R206 */
{ 0x0000, 0x0000 }, /* R207 */
{ 0xFFFF, 0xFFFF }, /* R208 */
{ 0xFFFF, 0xFFFF }, /* R209 */
{ 0xFFFF, 0xFFFF }, /* R210 */
{ 0x0000, 0x0000 }, /* R211 */
{ 0x0000, 0x0000 }, /* R212 */
{ 0x0000, 0x0000 }, /* R213 */
{ 0x0000, 0x0000 }, /* R214 */
{ 0x0000, 0x0000 }, /* R215 */
{ 0x0000, 0x0000 }, /* R216 */
{ 0x0000, 0x0000 }, /* R217 */
{ 0x0000, 0x0000 }, /* R218 */
{ 0x0000, 0x0000 }, /* R219 */
{ 0x0000, 0x0000 }, /* R220 */
{ 0x0000, 0x0000 }, /* R221 */
{ 0x0000, 0x0000 }, /* R222 */
{ 0x0000, 0x0000 }, /* R223 */
{ 0x0000, 0x0000 }, /* R224 */
{ 0x0000, 0x0000 }, /* R225 */
{ 0x0000, 0x0000 }, /* R226 */
{ 0x0000, 0x0000 }, /* R227 */
{ 0x0000, 0x0000 }, /* R228 */
{ 0x0000, 0x0000 }, /* R229 */
{ 0x0000, 0x0000 }, /* R230 */
{ 0x0000, 0x0000 }, /* R231 */
{ 0x0000, 0x0000 }, /* R232 */
{ 0x0000, 0x0000 }, /* R233 */
{ 0x0000, 0x0000 }, /* R234 */
{ 0x0000, 0x0000 }, /* R235 */
{ 0x0000, 0x0000 }, /* R236 */
{ 0x0000, 0x0000 }, /* R237 */
{ 0x0000, 0x0000 }, /* R238 */
{ 0x0000, 0x0000 }, /* R239 */
{ 0x0000, 0x0000 }, /* R240 */
{ 0x0000, 0x0000 }, /* R241 */
{ 0x0000, 0x0000 }, /* R242 */
{ 0x0000, 0x0000 }, /* R243 */
{ 0x0000, 0x0000 }, /* R244 */
{ 0x0000, 0x0000 }, /* R245 */
{ 0x0000, 0x0000 }, /* R246 */
{ 0x0000, 0x0000 }, /* R247 */
{ 0x0000, 0x0000 }, /* R248 */
{ 0x0000, 0x0000 }, /* R249 */
{ 0x0000, 0x0000 }, /* R250 */
{ 0x0000, 0x0000 }, /* R251 */
{ 0x0000, 0x0000 }, /* R252 */
{ 0x0000, 0x0000 }, /* R253 */
{ 0x0000, 0x0000 }, /* R254 */
{ 0x0000, 0x0000 }, /* R255 */
{ 0x000F, 0x0000 }, /* R256 - Chip Revision */
{ 0x0074, 0x0074 }, /* R257 - Control Interface */
{ 0x0000, 0x0000 }, /* R258 */
{ 0x0000, 0x0000 }, /* R259 */
{ 0x0000, 0x0000 }, /* R260 */
{ 0x0000, 0x0000 }, /* R261 */
{ 0x0000, 0x0000 }, /* R262 */
{ 0x0000, 0x0000 }, /* R263 */
{ 0x0000, 0x0000 }, /* R264 */
{ 0x0000, 0x0000 }, /* R265 */
{ 0x0000, 0x0000 }, /* R266 */
{ 0x0000, 0x0000 }, /* R267 */
{ 0x0000, 0x0000 }, /* R268 */
{ 0x0000, 0x0000 }, /* R269 */
{ 0x0000, 0x0000 }, /* R270 */
{ 0x0000, 0x0000 }, /* R271 */
{ 0x807F, 0x837F }, /* R272 - Write Sequencer Ctrl (1) */
{ 0x017F, 0x0000 }, /* R273 - Write Sequencer Ctrl (2) */
{ 0x0000, 0x0000 }, /* R274 */
{ 0x0000, 0x0000 }, /* R275 */
{ 0x0000, 0x0000 }, /* R276 */
{ 0x0000, 0x0000 }, /* R277 */
{ 0x0000, 0x0000 }, /* R278 */
{ 0x0000, 0x0000 }, /* R279 */
{ 0x0000, 0x0000 }, /* R280 */
{ 0x0000, 0x0000 }, /* R281 */
{ 0x0000, 0x0000 }, /* R282 */
{ 0x0000, 0x0000 }, /* R283 */
{ 0x0000, 0x0000 }, /* R284 */
{ 0x0000, 0x0000 }, /* R285 */
{ 0x0000, 0x0000 }, /* R286 */
{ 0x0000, 0x0000 }, /* R287 */
{ 0x0000, 0x0000 }, /* R288 */
{ 0x0000, 0x0000 }, /* R289 */
{ 0x0000, 0x0000 }, /* R290 */
{ 0x0000, 0x0000 }, /* R291 */
{ 0x0000, 0x0000 }, /* R292 */
{ 0x0000, 0x0000 }, /* R293 */
{ 0x0000, 0x0000 }, /* R294 */
{ 0x0000, 0x0000 }, /* R295 */
{ 0x0000, 0x0000 }, /* R296 */
{ 0x0000, 0x0000 }, /* R297 */
{ 0x0000, 0x0000 }, /* R298 */
{ 0x0000, 0x0000 }, /* R299 */
{ 0x0000, 0x0000 }, /* R300 */
{ 0x0000, 0x0000 }, /* R301 */
{ 0x0000, 0x0000 }, /* R302 */
{ 0x0000, 0x0000 }, /* R303 */
{ 0x0000, 0x0000 }, /* R304 */
{ 0x0000, 0x0000 }, /* R305 */
{ 0x0000, 0x0000 }, /* R306 */
{ 0x0000, 0x0000 }, /* R307 */
{ 0x0000, 0x0000 }, /* R308 */
{ 0x0000, 0x0000 }, /* R309 */
{ 0x0000, 0x0000 }, /* R310 */
{ 0x0000, 0x0000 }, /* R311 */
{ 0x0000, 0x0000 }, /* R312 */
{ 0x0000, 0x0000 }, /* R313 */
{ 0x0000, 0x0000 }, /* R314 */
{ 0x0000, 0x0000 }, /* R315 */
{ 0x0000, 0x0000 }, /* R316 */
{ 0x0000, 0x0000 }, /* R317 */
{ 0x0000, 0x0000 }, /* R318 */
{ 0x0000, 0x0000 }, /* R319 */
{ 0x0000, 0x0000 }, /* R320 */
{ 0x0000, 0x0000 }, /* R321 */
{ 0x0000, 0x0000 }, /* R322 */
{ 0x0000, 0x0000 }, /* R323 */
{ 0x0000, 0x0000 }, /* R324 */
{ 0x0000, 0x0000 }, /* R325 */
{ 0x0000, 0x0000 }, /* R326 */
{ 0x0000, 0x0000 }, /* R327 */
{ 0x0000, 0x0000 }, /* R328 */
{ 0x0000, 0x0000 }, /* R329 */
{ 0x0000, 0x0000 }, /* R330 */
{ 0x0000, 0x0000 }, /* R331 */
{ 0x0000, 0x0000 }, /* R332 */
{ 0x0000, 0x0000 }, /* R333 */
{ 0x0000, 0x0000 }, /* R334 */
{ 0x0000, 0x0000 }, /* R335 */
{ 0x0000, 0x0000 }, /* R336 */
{ 0x0000, 0x0000 }, /* R337 */
{ 0x0000, 0x0000 }, /* R338 */
{ 0x0000, 0x0000 }, /* R339 */
{ 0x0000, 0x0000 }, /* R340 */
{ 0x0000, 0x0000 }, /* R341 */
{ 0x0000, 0x0000 }, /* R342 */
{ 0x0000, 0x0000 }, /* R343 */
{ 0x0000, 0x0000 }, /* R344 */
{ 0x0000, 0x0000 }, /* R345 */
{ 0x0000, 0x0000 }, /* R346 */
{ 0x0000, 0x0000 }, /* R347 */
{ 0x0000, 0x0000 }, /* R348 */
{ 0x0000, 0x0000 }, /* R349 */
{ 0x0000, 0x0000 }, /* R350 */
{ 0x0000, 0x0000 }, /* R351 */
{ 0x0000, 0x0000 }, /* R352 */
{ 0x0000, 0x0000 }, /* R353 */
{ 0x0000, 0x0000 }, /* R354 */
{ 0x0000, 0x0000 }, /* R355 */
{ 0x0000, 0x0000 }, /* R356 */
{ 0x0000, 0x0000 }, /* R357 */
{ 0x0000, 0x0000 }, /* R358 */
{ 0x0000, 0x0000 }, /* R359 */
{ 0x0000, 0x0000 }, /* R360 */
{ 0x0000, 0x0000 }, /* R361 */
{ 0x0000, 0x0000 }, /* R362 */
{ 0x0000, 0x0000 }, /* R363 */
{ 0x0000, 0x0000 }, /* R364 */
{ 0x0000, 0x0000 }, /* R365 */
{ 0x0000, 0x0000 }, /* R366 */
{ 0x0000, 0x0000 }, /* R367 */
{ 0x0000, 0x0000 }, /* R368 */
{ 0x0000, 0x0000 }, /* R369 */
{ 0x0000, 0x0000 }, /* R370 */
{ 0x0000, 0x0000 }, /* R371 */
{ 0x0000, 0x0000 }, /* R372 */
{ 0x0000, 0x0000 }, /* R373 */
{ 0x0000, 0x0000 }, /* R374 */
{ 0x0000, 0x0000 }, /* R375 */
{ 0x0000, 0x0000 }, /* R376 */
{ 0x0000, 0x0000 }, /* R377 */
{ 0x0000, 0x0000 }, /* R378 */
{ 0x0000, 0x0000 }, /* R379 */
{ 0x0000, 0x0000 }, /* R380 */
{ 0x0000, 0x0000 }, /* R381 */
{ 0x0000, 0x0000 }, /* R382 */
{ 0x0000, 0x0000 }, /* R383 */
{ 0x0000, 0x0000 }, /* R384 */
{ 0x0000, 0x0000 }, /* R385 */
{ 0x0000, 0x0000 }, /* R386 */
{ 0x0000, 0x0000 }, /* R387 */
{ 0x0000, 0x0000 }, /* R388 */
{ 0x0000, 0x0000 }, /* R389 */
{ 0x0000, 0x0000 }, /* R390 */
{ 0x0000, 0x0000 }, /* R391 */
{ 0x0000, 0x0000 }, /* R392 */
{ 0x0000, 0x0000 }, /* R393 */
{ 0x0000, 0x0000 }, /* R394 */
{ 0x0000, 0x0000 }, /* R395 */
{ 0x0000, 0x0000 }, /* R396 */
{ 0x0000, 0x0000 }, /* R397 */
{ 0x0000, 0x0000 }, /* R398 */
{ 0x0000, 0x0000 }, /* R399 */
{ 0x0000, 0x0000 }, /* R400 */
{ 0x0000, 0x0000 }, /* R401 */
{ 0x0000, 0x0000 }, /* R402 */
{ 0x0000, 0x0000 }, /* R403 */
{ 0x0000, 0x0000 }, /* R404 */
{ 0x0000, 0x0000 }, /* R405 */
{ 0x0000, 0x0000 }, /* R406 */
{ 0x0000, 0x0000 }, /* R407 */
{ 0x0000, 0x0000 }, /* R408 */
{ 0x0000, 0x0000 }, /* R409 */
{ 0x0000, 0x0000 }, /* R410 */
{ 0x0000, 0x0000 }, /* R411 */
{ 0x0000, 0x0000 }, /* R412 */
{ 0x0000, 0x0000 }, /* R413 */
{ 0x0000, 0x0000 }, /* R414 */
{ 0x0000, 0x0000 }, /* R415 */
{ 0x0000, 0x0000 }, /* R416 */
{ 0x0000, 0x0000 }, /* R417 */
{ 0x0000, 0x0000 }, /* R418 */
{ 0x0000, 0x0000 }, /* R419 */
{ 0x0000, 0x0000 }, /* R420 */
{ 0x0000, 0x0000 }, /* R421 */
{ 0x0000, 0x0000 }, /* R422 */
{ 0x0000, 0x0000 }, /* R423 */
{ 0x0000, 0x0000 }, /* R424 */
{ 0x0000, 0x0000 }, /* R425 */
{ 0x0000, 0x0000 }, /* R426 */
{ 0x0000, 0x0000 }, /* R427 */
{ 0x0000, 0x0000 }, /* R428 */
{ 0x0000, 0x0000 }, /* R429 */
{ 0x0000, 0x0000 }, /* R430 */
{ 0x0000, 0x0000 }, /* R431 */
{ 0x0000, 0x0000 }, /* R432 */
{ 0x0000, 0x0000 }, /* R433 */
{ 0x0000, 0x0000 }, /* R434 */
{ 0x0000, 0x0000 }, /* R435 */
{ 0x0000, 0x0000 }, /* R436 */
{ 0x0000, 0x0000 }, /* R437 */
{ 0x0000, 0x0000 }, /* R438 */
{ 0x0000, 0x0000 }, /* R439 */
{ 0x0000, 0x0000 }, /* R440 */
{ 0x0000, 0x0000 }, /* R441 */
{ 0x0000, 0x0000 }, /* R442 */
{ 0x0000, 0x0000 }, /* R443 */
{ 0x0000, 0x0000 }, /* R444 */
{ 0x0000, 0x0000 }, /* R445 */
{ 0x0000, 0x0000 }, /* R446 */
{ 0x0000, 0x0000 }, /* R447 */
{ 0x0000, 0x0000 }, /* R448 */
{ 0x0000, 0x0000 }, /* R449 */
{ 0x0000, 0x0000 }, /* R450 */
{ 0x0000, 0x0000 }, /* R451 */
{ 0x0000, 0x0000 }, /* R452 */
{ 0x0000, 0x0000 }, /* R453 */
{ 0x0000, 0x0000 }, /* R454 */
{ 0x0000, 0x0000 }, /* R455 */
{ 0x0000, 0x0000 }, /* R456 */
{ 0x0000, 0x0000 }, /* R457 */
{ 0x0000, 0x0000 }, /* R458 */
{ 0x0000, 0x0000 }, /* R459 */
{ 0x0000, 0x0000 }, /* R460 */
{ 0x0000, 0x0000 }, /* R461 */
{ 0x0000, 0x0000 }, /* R462 */
{ 0x0000, 0x0000 }, /* R463 */
{ 0x0000, 0x0000 }, /* R464 */
{ 0x0000, 0x0000 }, /* R465 */
{ 0x0000, 0x0000 }, /* R466 */
{ 0x0000, 0x0000 }, /* R467 */
{ 0x0000, 0x0000 }, /* R468 */
{ 0x0000, 0x0000 }, /* R469 */
{ 0x0000, 0x0000 }, /* R470 */
{ 0x0000, 0x0000 }, /* R471 */
{ 0x0000, 0x0000 }, /* R472 */
{ 0x0000, 0x0000 }, /* R473 */
{ 0x0000, 0x0000 }, /* R474 */
{ 0x0000, 0x0000 }, /* R475 */
{ 0x0000, 0x0000 }, /* R476 */
{ 0x0000, 0x0000 }, /* R477 */
{ 0x0000, 0x0000 }, /* R478 */
{ 0x0000, 0x0000 }, /* R479 */
{ 0x0000, 0x0000 }, /* R480 */
{ 0x0000, 0x0000 }, /* R481 */
{ 0x0000, 0x0000 }, /* R482 */
{ 0x0000, 0x0000 }, /* R483 */
{ 0x0000, 0x0000 }, /* R484 */
{ 0x0000, 0x0000 }, /* R485 */
{ 0x0000, 0x0000 }, /* R486 */
{ 0x0000, 0x0000 }, /* R487 */
{ 0x0000, 0x0000 }, /* R488 */
{ 0x0000, 0x0000 }, /* R489 */
{ 0x0000, 0x0000 }, /* R490 */
{ 0x0000, 0x0000 }, /* R491 */
{ 0x0000, 0x0000 }, /* R492 */
{ 0x0000, 0x0000 }, /* R493 */
{ 0x0000, 0x0000 }, /* R494 */
{ 0x0000, 0x0000 }, /* R495 */
{ 0x0000, 0x0000 }, /* R496 */
{ 0x0000, 0x0000 }, /* R497 */
{ 0x0000, 0x0000 }, /* R498 */
{ 0x0000, 0x0000 }, /* R499 */
{ 0x0000, 0x0000 }, /* R500 */
{ 0x0000, 0x0000 }, /* R501 */
{ 0x0000, 0x0000 }, /* R502 */
{ 0x0000, 0x0000 }, /* R503 */
{ 0x0000, 0x0000 }, /* R504 */
{ 0x0000, 0x0000 }, /* R505 */
{ 0x0000, 0x0000 }, /* R506 */
{ 0x0000, 0x0000 }, /* R507 */
{ 0x0000, 0x0000 }, /* R508 */
{ 0x0000, 0x0000 }, /* R509 */
{ 0x0000, 0x0000 }, /* R510 */
{ 0x0000, 0x0000 }, /* R511 */
{ 0x001F, 0x001F }, /* R512 - AIF1 Clocking (1) */
{ 0x003F, 0x003F }, /* R513 - AIF1 Clocking (2) */
{ 0x0000, 0x0000 }, /* R514 */
{ 0x0000, 0x0000 }, /* R515 */
{ 0x001F, 0x001F }, /* R516 - AIF2 Clocking (1) */
{ 0x003F, 0x003F }, /* R517 - AIF2 Clocking (2) */
{ 0x0000, 0x0000 }, /* R518 */
{ 0x0000, 0x0000 }, /* R519 */
{ 0x001F, 0x001F }, /* R520 - Clocking (1) */
{ 0x0777, 0x0777 }, /* R521 - Clocking (2) */
{ 0x0000, 0x0000 }, /* R522 */
{ 0x0000, 0x0000 }, /* R523 */
{ 0x0000, 0x0000 }, /* R524 */
{ 0x0000, 0x0000 }, /* R525 */
{ 0x0000, 0x0000 }, /* R526 */
{ 0x0000, 0x0000 }, /* R527 */
{ 0x00FF, 0x00FF }, /* R528 - AIF1 Rate */
{ 0x00FF, 0x00FF }, /* R529 - AIF2 Rate */
{ 0x000F, 0x0000 }, /* R530 - Rate Status */
{ 0x0000, 0x0000 }, /* R531 */
{ 0x0000, 0x0000 }, /* R532 */
{ 0x0000, 0x0000 }, /* R533 */
{ 0x0000, 0x0000 }, /* R534 */
{ 0x0000, 0x0000 }, /* R535 */
{ 0x0000, 0x0000 }, /* R536 */
{ 0x0000, 0x0000 }, /* R537 */
{ 0x0000, 0x0000 }, /* R538 */
{ 0x0000, 0x0000 }, /* R539 */
{ 0x0000, 0x0000 }, /* R540 */
{ 0x0000, 0x0000 }, /* R541 */
{ 0x0000, 0x0000 }, /* R542 */
{ 0x0000, 0x0000 }, /* R543 */
{ 0x0007, 0x0007 }, /* R544 - FLL1 Control (1) */
{ 0x3F77, 0x3F77 }, /* R545 - FLL1 Control (2) */
{ 0xFFFF, 0xFFFF }, /* R546 - FLL1 Control (3) */
{ 0x7FEF, 0x7FEF }, /* R547 - FLL1 Control (4) */
{ 0x1FDB, 0x1FDB }, /* R548 - FLL1 Control (5) */
{ 0x0000, 0x0000 }, /* R549 */
{ 0x0000, 0x0000 }, /* R550 */
{ 0x0000, 0x0000 }, /* R551 */
{ 0x0000, 0x0000 }, /* R552 */
{ 0x0000, 0x0000 }, /* R553 */
{ 0x0000, 0x0000 }, /* R554 */
{ 0x0000, 0x0000 }, /* R555 */
{ 0x0000, 0x0000 }, /* R556 */
{ 0x0000, 0x0000 }, /* R557 */
{ 0x0000, 0x0000 }, /* R558 */
{ 0x0000, 0x0000 }, /* R559 */
{ 0x0000, 0x0000 }, /* R560 */
{ 0x0000, 0x0000 }, /* R561 */
{ 0x0000, 0x0000 }, /* R562 */
{ 0x0000, 0x0000 }, /* R563 */
{ 0x0000, 0x0000 }, /* R564 */
{ 0x0000, 0x0000 }, /* R565 */
{ 0x0000, 0x0000 }, /* R566 */
{ 0x0000, 0x0000 }, /* R567 */
{ 0x0000, 0x0000 }, /* R568 */
{ 0x0000, 0x0000 }, /* R569 */
{ 0x0000, 0x0000 }, /* R570 */
{ 0x0000, 0x0000 }, /* R571 */
{ 0x0000, 0x0000 }, /* R572 */
{ 0x0000, 0x0000 }, /* R573 */
{ 0x0000, 0x0000 }, /* R574 */
{ 0x0000, 0x0000 }, /* R575 */
{ 0x0007, 0x0007 }, /* R576 - FLL2 Control (1) */
{ 0x3F77, 0x3F77 }, /* R577 - FLL2 Control (2) */
{ 0xFFFF, 0xFFFF }, /* R578 - FLL2 Control (3) */
{ 0x7FEF, 0x7FEF }, /* R579 - FLL2 Control (4) */
{ 0x1FDB, 0x1FDB }, /* R580 - FLL2 Control (5) */
{ 0x0000, 0x0000 }, /* R581 */
{ 0x0000, 0x0000 }, /* R582 */
{ 0x0000, 0x0000 }, /* R583 */
{ 0x0000, 0x0000 }, /* R584 */
{ 0x0000, 0x0000 }, /* R585 */
{ 0x0000, 0x0000 }, /* R586 */
{ 0x0000, 0x0000 }, /* R587 */
{ 0x0000, 0x0000 }, /* R588 */
{ 0x0000, 0x0000 }, /* R589 */
{ 0x0000, 0x0000 }, /* R590 */
{ 0x0000, 0x0000 }, /* R591 */
{ 0x0000, 0x0000 }, /* R592 */
{ 0x0000, 0x0000 }, /* R593 */
{ 0x0000, 0x0000 }, /* R594 */
{ 0x0000, 0x0000 }, /* R595 */
{ 0x0000, 0x0000 }, /* R596 */
{ 0x0000, 0x0000 }, /* R597 */
{ 0x0000, 0x0000 }, /* R598 */
{ 0x0000, 0x0000 }, /* R599 */
{ 0x0000, 0x0000 }, /* R600 */
{ 0x0000, 0x0000 }, /* R601 */
{ 0x0000, 0x0000 }, /* R602 */
{ 0x0000, 0x0000 }, /* R603 */
{ 0x0000, 0x0000 }, /* R604 */
{ 0x0000, 0x0000 }, /* R605 */
{ 0x0000, 0x0000 }, /* R606 */
{ 0x0000, 0x0000 }, /* R607 */
{ 0x0000, 0x0000 }, /* R608 */
{ 0x0000, 0x0000 }, /* R609 */
{ 0x0000, 0x0000 }, /* R610 */
{ 0x0000, 0x0000 }, /* R611 */
{ 0x0000, 0x0000 }, /* R612 */
{ 0x0000, 0x0000 }, /* R613 */
{ 0x0000, 0x0000 }, /* R614 */
{ 0x0000, 0x0000 }, /* R615 */
{ 0x0000, 0x0000 }, /* R616 */
{ 0x0000, 0x0000 }, /* R617 */
{ 0x0000, 0x0000 }, /* R618 */
{ 0x0000, 0x0000 }, /* R619 */
{ 0x0000, 0x0000 }, /* R620 */
{ 0x0000, 0x0000 }, /* R621 */
{ 0x0000, 0x0000 }, /* R622 */
{ 0x0000, 0x0000 }, /* R623 */
{ 0x0000, 0x0000 }, /* R624 */
{ 0x0000, 0x0000 }, /* R625 */
{ 0x0000, 0x0000 }, /* R626 */
{ 0x0000, 0x0000 }, /* R627 */
{ 0x0000, 0x0000 }, /* R628 */
{ 0x0000, 0x0000 }, /* R629 */
{ 0x0000, 0x0000 }, /* R630 */
{ 0x0000, 0x0000 }, /* R631 */
{ 0x0000, 0x0000 }, /* R632 */
{ 0x0000, 0x0000 }, /* R633 */
{ 0x0000, 0x0000 }, /* R634 */
{ 0x0000, 0x0000 }, /* R635 */
{ 0x0000, 0x0000 }, /* R636 */
{ 0x0000, 0x0000 }, /* R637 */
{ 0x0000, 0x0000 }, /* R638 */
{ 0x0000, 0x0000 }, /* R639 */
{ 0x0000, 0x0000 }, /* R640 */
{ 0x0000, 0x0000 }, /* R641 */
{ 0x0000, 0x0000 }, /* R642 */
{ 0x0000, 0x0000 }, /* R643 */
{ 0x0000, 0x0000 }, /* R644 */
{ 0x0000, 0x0000 }, /* R645 */
{ 0x0000, 0x0000 }, /* R646 */
{ 0x0000, 0x0000 }, /* R647 */
{ 0x0000, 0x0000 }, /* R648 */
{ 0x0000, 0x0000 }, /* R649 */
{ 0x0000, 0x0000 }, /* R650 */
{ 0x0000, 0x0000 }, /* R651 */
{ 0x0000, 0x0000 }, /* R652 */
{ 0x0000, 0x0000 }, /* R653 */
{ 0x0000, 0x0000 }, /* R654 */
{ 0x0000, 0x0000 }, /* R655 */
{ 0x0000, 0x0000 }, /* R656 */
{ 0x0000, 0x0000 }, /* R657 */
{ 0x0000, 0x0000 }, /* R658 */
{ 0x0000, 0x0000 }, /* R659 */
{ 0x0000, 0x0000 }, /* R660 */
{ 0x0000, 0x0000 }, /* R661 */
{ 0x0000, 0x0000 }, /* R662 */
{ 0x0000, 0x0000 }, /* R663 */
{ 0x0000, 0x0000 }, /* R664 */
{ 0x0000, 0x0000 }, /* R665 */
{ 0x0000, 0x0000 }, /* R666 */
{ 0x0000, 0x0000 }, /* R667 */
{ 0x0000, 0x0000 }, /* R668 */
{ 0x0000, 0x0000 }, /* R669 */
{ 0x0000, 0x0000 }, /* R670 */
{ 0x0000, 0x0000 }, /* R671 */
{ 0x0000, 0x0000 }, /* R672 */
{ 0x0000, 0x0000 }, /* R673 */
{ 0x0000, 0x0000 }, /* R674 */
{ 0x0000, 0x0000 }, /* R675 */
{ 0x0000, 0x0000 }, /* R676 */
{ 0x0000, 0x0000 }, /* R677 */
{ 0x0000, 0x0000 }, /* R678 */
{ 0x0000, 0x0000 }, /* R679 */
{ 0x0000, 0x0000 }, /* R680 */
{ 0x0000, 0x0000 }, /* R681 */
{ 0x0000, 0x0000 }, /* R682 */
{ 0x0000, 0x0000 }, /* R683 */
{ 0x0000, 0x0000 }, /* R684 */
{ 0x0000, 0x0000 }, /* R685 */
{ 0x0000, 0x0000 }, /* R686 */
{ 0x0000, 0x0000 }, /* R687 */
{ 0x0000, 0x0000 }, /* R688 */
{ 0x0000, 0x0000 }, /* R689 */
{ 0x0000, 0x0000 }, /* R690 */
{ 0x0000, 0x0000 }, /* R691 */
{ 0x0000, 0x0000 }, /* R692 */
{ 0x0000, 0x0000 }, /* R693 */
{ 0x0000, 0x0000 }, /* R694 */
{ 0x0000, 0x0000 }, /* R695 */
{ 0x0000, 0x0000 }, /* R696 */
{ 0x0000, 0x0000 }, /* R697 */
{ 0x0000, 0x0000 }, /* R698 */
{ 0x0000, 0x0000 }, /* R699 */
{ 0x0000, 0x0000 }, /* R700 */
{ 0x0000, 0x0000 }, /* R701 */
{ 0x0000, 0x0000 }, /* R702 */
{ 0x0000, 0x0000 }, /* R703 */
{ 0x0000, 0x0000 }, /* R704 */
{ 0x0000, 0x0000 }, /* R705 */
{ 0x0000, 0x0000 }, /* R706 */
{ 0x0000, 0x0000 }, /* R707 */
{ 0x0000, 0x0000 }, /* R708 */
{ 0x0000, 0x0000 }, /* R709 */
{ 0x0000, 0x0000 }, /* R710 */
{ 0x0000, 0x0000 }, /* R711 */
{ 0x0000, 0x0000 }, /* R712 */
{ 0x0000, 0x0000 }, /* R713 */
{ 0x0000, 0x0000 }, /* R714 */
{ 0x0000, 0x0000 }, /* R715 */
{ 0x0000, 0x0000 }, /* R716 */
{ 0x0000, 0x0000 }, /* R717 */
{ 0x0000, 0x0000 }, /* R718 */
{ 0x0000, 0x0000 }, /* R719 */
{ 0x0000, 0x0000 }, /* R720 */
{ 0x0000, 0x0000 }, /* R721 */
{ 0x0000, 0x0000 }, /* R722 */
{ 0x0000, 0x0000 }, /* R723 */
{ 0x0000, 0x0000 }, /* R724 */
{ 0x0000, 0x0000 }, /* R725 */
{ 0x0000, 0x0000 }, /* R726 */
{ 0x0000, 0x0000 }, /* R727 */
{ 0x0000, 0x0000 }, /* R728 */
{ 0x0000, 0x0000 }, /* R729 */
{ 0x0000, 0x0000 }, /* R730 */
{ 0x0000, 0x0000 }, /* R731 */
{ 0x0000, 0x0000 }, /* R732 */
{ 0x0000, 0x0000 }, /* R733 */
{ 0x0000, 0x0000 }, /* R734 */
{ 0x0000, 0x0000 }, /* R735 */
{ 0x0000, 0x0000 }, /* R736 */
{ 0x0000, 0x0000 }, /* R737 */
{ 0x0000, 0x0000 }, /* R738 */
{ 0x0000, 0x0000 }, /* R739 */
{ 0x0000, 0x0000 }, /* R740 */
{ 0x0000, 0x0000 }, /* R741 */
{ 0x0000, 0x0000 }, /* R742 */
{ 0x0000, 0x0000 }, /* R743 */
{ 0x0000, 0x0000 }, /* R744 */
{ 0x0000, 0x0000 }, /* R745 */
{ 0x0000, 0x0000 }, /* R746 */
{ 0x0000, 0x0000 }, /* R747 */
{ 0x0000, 0x0000 }, /* R748 */
{ 0x0000, 0x0000 }, /* R749 */
{ 0x0000, 0x0000 }, /* R750 */
{ 0x0000, 0x0000 }, /* R751 */
{ 0x0000, 0x0000 }, /* R752 */
{ 0x0000, 0x0000 }, /* R753 */
{ 0x0000, 0x0000 }, /* R754 */
{ 0x0000, 0x0000 }, /* R755 */
{ 0x0000, 0x0000 }, /* R756 */
{ 0x0000, 0x0000 }, /* R757 */
{ 0x0000, 0x0000 }, /* R758 */
{ 0x0000, 0x0000 }, /* R759 */
{ 0x0000, 0x0000 }, /* R760 */
{ 0x0000, 0x0000 }, /* R761 */
{ 0x0000, 0x0000 }, /* R762 */
{ 0x0000, 0x0000 }, /* R763 */
{ 0x0000, 0x0000 }, /* R764 */
{ 0x0000, 0x0000 }, /* R765 */
{ 0x0000, 0x0000 }, /* R766 */
{ 0x0000, 0x0000 }, /* R767 */
{ 0xE1F8, 0xE1F8 }, /* R768 - AIF1 Control (1) */
{ 0xCD1F, 0xCD1F }, /* R769 - AIF1 Control (2) */
{ 0xF000, 0xF000 }, /* R770 - AIF1 Master/Slave */
{ 0x01F0, 0x01F0 }, /* R771 - AIF1 BCLK */
{ 0x0FFF, 0x0FFF }, /* R772 - AIF1ADC LRCLK */
{ 0x0FFF, 0x0FFF }, /* R773 - AIF1DAC LRCLK */
{ 0x0003, 0x0003 }, /* R774 - AIF1DAC Data */
{ 0x0003, 0x0003 }, /* R775 - AIF1ADC Data */
{ 0x0000, 0x0000 }, /* R776 */
{ 0x0000, 0x0000 }, /* R777 */
{ 0x0000, 0x0000 }, /* R778 */
{ 0x0000, 0x0000 }, /* R779 */
{ 0x0000, 0x0000 }, /* R780 */
{ 0x0000, 0x0000 }, /* R781 */
{ 0x0000, 0x0000 }, /* R782 */
{ 0x0000, 0x0000 }, /* R783 */
{ 0xF1F8, 0xF1F8 }, /* R784 - AIF2 Control (1) */
{ 0xFD1F, 0xFD1F }, /* R785 - AIF2 Control (2) */
{ 0xF000, 0xF000 }, /* R786 - AIF2 Master/Slave */
{ 0x01F0, 0x01F0 }, /* R787 - AIF2 BCLK */
{ 0x0FFF, 0x0FFF }, /* R788 - AIF2ADC LRCLK */
{ 0x0FFF, 0x0FFF }, /* R789 - AIF2DAC LRCLK */
{ 0x0003, 0x0003 }, /* R790 - AIF2DAC Data */
{ 0x0003, 0x0003 }, /* R791 - AIF2ADC Data */
{ 0x0000, 0x0000 }, /* R792 */
{ 0x0000, 0x0000 }, /* R793 */
{ 0x0000, 0x0000 }, /* R794 */
{ 0x0000, 0x0000 }, /* R795 */
{ 0x0000, 0x0000 }, /* R796 */
{ 0x0000, 0x0000 }, /* R797 */
{ 0x0000, 0x0000 }, /* R798 */
{ 0x0000, 0x0000 }, /* R799 */
{ 0x0000, 0x0000 }, /* R800 */
{ 0x0000, 0x0000 }, /* R801 */
{ 0x0000, 0x0000 }, /* R802 */
{ 0x0000, 0x0000 }, /* R803 */
{ 0x0000, 0x0000 }, /* R804 */
{ 0x0000, 0x0000 }, /* R805 */
{ 0x0000, 0x0000 }, /* R806 */
{ 0x0000, 0x0000 }, /* R807 */
{ 0x0000, 0x0000 }, /* R808 */
{ 0x0000, 0x0000 }, /* R809 */
{ 0x0000, 0x0000 }, /* R810 */
{ 0x0000, 0x0000 }, /* R811 */
{ 0x0000, 0x0000 }, /* R812 */
{ 0x0000, 0x0000 }, /* R813 */
{ 0x0000, 0x0000 }, /* R814 */
{ 0x0000, 0x0000 }, /* R815 */
{ 0x0000, 0x0000 }, /* R816 */
{ 0x0000, 0x0000 }, /* R817 */
{ 0x0000, 0x0000 }, /* R818 */
{ 0x0000, 0x0000 }, /* R819 */
{ 0x0000, 0x0000 }, /* R820 */
{ 0x0000, 0x0000 }, /* R821 */
{ 0x0000, 0x0000 }, /* R822 */
{ 0x0000, 0x0000 }, /* R823 */
{ 0x0000, 0x0000 }, /* R824 */
{ 0x0000, 0x0000 }, /* R825 */
{ 0x0000, 0x0000 }, /* R826 */
{ 0x0000, 0x0000 }, /* R827 */
{ 0x0000, 0x0000 }, /* R828 */
{ 0x0000, 0x0000 }, /* R829 */
{ 0x0000, 0x0000 }, /* R830 */
{ 0x0000, 0x0000 }, /* R831 */
{ 0x0000, 0x0000 }, /* R832 */
{ 0x0000, 0x0000 }, /* R833 */
{ 0x0000, 0x0000 }, /* R834 */
{ 0x0000, 0x0000 }, /* R835 */
{ 0x0000, 0x0000 }, /* R836 */
{ 0x0000, 0x0000 }, /* R837 */
{ 0x0000, 0x0000 }, /* R838 */
{ 0x0000, 0x0000 }, /* R839 */
{ 0x0000, 0x0000 }, /* R840 */
{ 0x0000, 0x0000 }, /* R841 */
{ 0x0000, 0x0000 }, /* R842 */
{ 0x0000, 0x0000 }, /* R843 */
{ 0x0000, 0x0000 }, /* R844 */
{ 0x0000, 0x0000 }, /* R845 */
{ 0x0000, 0x0000 }, /* R846 */
{ 0x0000, 0x0000 }, /* R847 */
{ 0x0000, 0x0000 }, /* R848 */
{ 0x0000, 0x0000 }, /* R849 */
{ 0x0000, 0x0000 }, /* R850 */
{ 0x0000, 0x0000 }, /* R851 */
{ 0x0000, 0x0000 }, /* R852 */
{ 0x0000, 0x0000 }, /* R853 */
{ 0x0000, 0x0000 }, /* R854 */
{ 0x0000, 0x0000 }, /* R855 */
{ 0x0000, 0x0000 }, /* R856 */
{ 0x0000, 0x0000 }, /* R857 */
{ 0x0000, 0x0000 }, /* R858 */
{ 0x0000, 0x0000 }, /* R859 */
{ 0x0000, 0x0000 }, /* R860 */
{ 0x0000, 0x0000 }, /* R861 */
{ 0x0000, 0x0000 }, /* R862 */
{ 0x0000, 0x0000 }, /* R863 */
{ 0x0000, 0x0000 }, /* R864 */
{ 0x0000, 0x0000 }, /* R865 */
{ 0x0000, 0x0000 }, /* R866 */
{ 0x0000, 0x0000 }, /* R867 */
{ 0x0000, 0x0000 }, /* R868 */
{ 0x0000, 0x0000 }, /* R869 */
{ 0x0000, 0x0000 }, /* R870 */
{ 0x0000, 0x0000 }, /* R871 */
{ 0x0000, 0x0000 }, /* R872 */
{ 0x0000, 0x0000 }, /* R873 */
{ 0x0000, 0x0000 }, /* R874 */
{ 0x0000, 0x0000 }, /* R875 */
{ 0x0000, 0x0000 }, /* R876 */
{ 0x0000, 0x0000 }, /* R877 */
{ 0x0000, 0x0000 }, /* R878 */
{ 0x0000, 0x0000 }, /* R879 */
{ 0x0000, 0x0000 }, /* R880 */
{ 0x0000, 0x0000 }, /* R881 */
{ 0x0000, 0x0000 }, /* R882 */
{ 0x0000, 0x0000 }, /* R883 */
{ 0x0000, 0x0000 }, /* R884 */
{ 0x0000, 0x0000 }, /* R885 */
{ 0x0000, 0x0000 }, /* R886 */
{ 0x0000, 0x0000 }, /* R887 */
{ 0x0000, 0x0000 }, /* R888 */
{ 0x0000, 0x0000 }, /* R889 */
{ 0x0000, 0x0000 }, /* R890 */
{ 0x0000, 0x0000 }, /* R891 */
{ 0x0000, 0x0000 }, /* R892 */
{ 0x0000, 0x0000 }, /* R893 */
{ 0x0000, 0x0000 }, /* R894 */
{ 0x0000, 0x0000 }, /* R895 */
{ 0x0000, 0x0000 }, /* R896 */
{ 0x0000, 0x0000 }, /* R897 */
{ 0x0000, 0x0000 }, /* R898 */
{ 0x0000, 0x0000 }, /* R899 */
{ 0x0000, 0x0000 }, /* R900 */
{ 0x0000, 0x0000 }, /* R901 */
{ 0x0000, 0x0000 }, /* R902 */
{ 0x0000, 0x0000 }, /* R903 */
{ 0x0000, 0x0000 }, /* R904 */
{ 0x0000, 0x0000 }, /* R905 */
{ 0x0000, 0x0000 }, /* R906 */
{ 0x0000, 0x0000 }, /* R907 */
{ 0x0000, 0x0000 }, /* R908 */
{ 0x0000, 0x0000 }, /* R909 */
{ 0x0000, 0x0000 }, /* R910 */
{ 0x0000, 0x0000 }, /* R911 */
{ 0x0000, 0x0000 }, /* R912 */
{ 0x0000, 0x0000 }, /* R913 */
{ 0x0000, 0x0000 }, /* R914 */
{ 0x0000, 0x0000 }, /* R915 */
{ 0x0000, 0x0000 }, /* R916 */
{ 0x0000, 0x0000 }, /* R917 */
{ 0x0000, 0x0000 }, /* R918 */
{ 0x0000, 0x0000 }, /* R919 */
{ 0x0000, 0x0000 }, /* R920 */
{ 0x0000, 0x0000 }, /* R921 */
{ 0x0000, 0x0000 }, /* R922 */
{ 0x0000, 0x0000 }, /* R923 */
{ 0x0000, 0x0000 }, /* R924 */
{ 0x0000, 0x0000 }, /* R925 */
{ 0x0000, 0x0000 }, /* R926 */
{ 0x0000, 0x0000 }, /* R927 */
{ 0x0000, 0x0000 }, /* R928 */
{ 0x0000, 0x0000 }, /* R929 */
{ 0x0000, 0x0000 }, /* R930 */
{ 0x0000, 0x0000 }, /* R931 */
{ 0x0000, 0x0000 }, /* R932 */
{ 0x0000, 0x0000 }, /* R933 */
{ 0x0000, 0x0000 }, /* R934 */
{ 0x0000, 0x0000 }, /* R935 */
{ 0x0000, 0x0000 }, /* R936 */
{ 0x0000, 0x0000 }, /* R937 */
{ 0x0000, 0x0000 }, /* R938 */
{ 0x0000, 0x0000 }, /* R939 */
{ 0x0000, 0x0000 }, /* R940 */
{ 0x0000, 0x0000 }, /* R941 */
{ 0x0000, 0x0000 }, /* R942 */
{ 0x0000, 0x0000 }, /* R943 */
{ 0x0000, 0x0000 }, /* R944 */
{ 0x0000, 0x0000 }, /* R945 */
{ 0x0000, 0x0000 }, /* R946 */
{ 0x0000, 0x0000 }, /* R947 */
{ 0x0000, 0x0000 }, /* R948 */
{ 0x0000, 0x0000 }, /* R949 */
{ 0x0000, 0x0000 }, /* R950 */
{ 0x0000, 0x0000 }, /* R951 */
{ 0x0000, 0x0000 }, /* R952 */
{ 0x0000, 0x0000 }, /* R953 */
{ 0x0000, 0x0000 }, /* R954 */
{ 0x0000, 0x0000 }, /* R955 */
{ 0x0000, 0x0000 }, /* R956 */
{ 0x0000, 0x0000 }, /* R957 */
{ 0x0000, 0x0000 }, /* R958 */
{ 0x0000, 0x0000 }, /* R959 */
{ 0x0000, 0x0000 }, /* R960 */
{ 0x0000, 0x0000 }, /* R961 */
{ 0x0000, 0x0000 }, /* R962 */
{ 0x0000, 0x0000 }, /* R963 */
{ 0x0000, 0x0000 }, /* R964 */
{ 0x0000, 0x0000 }, /* R965 */
{ 0x0000, 0x0000 }, /* R966 */
{ 0x0000, 0x0000 }, /* R967 */
{ 0x0000, 0x0000 }, /* R968 */
{ 0x0000, 0x0000 }, /* R969 */
{ 0x0000, 0x0000 }, /* R970 */
{ 0x0000, 0x0000 }, /* R971 */
{ 0x0000, 0x0000 }, /* R972 */
{ 0x0000, 0x0000 }, /* R973 */
{ 0x0000, 0x0000 }, /* R974 */
{ 0x0000, 0x0000 }, /* R975 */
{ 0x0000, 0x0000 }, /* R976 */
{ 0x0000, 0x0000 }, /* R977 */
{ 0x0000, 0x0000 }, /* R978 */
{ 0x0000, 0x0000 }, /* R979 */
{ 0x0000, 0x0000 }, /* R980 */
{ 0x0000, 0x0000 }, /* R981 */
{ 0x0000, 0x0000 }, /* R982 */
{ 0x0000, 0x0000 }, /* R983 */
{ 0x0000, 0x0000 }, /* R984 */
{ 0x0000, 0x0000 }, /* R985 */
{ 0x0000, 0x0000 }, /* R986 */
{ 0x0000, 0x0000 }, /* R987 */
{ 0x0000, 0x0000 }, /* R988 */
{ 0x0000, 0x0000 }, /* R989 */
{ 0x0000, 0x0000 }, /* R990 */
{ 0x0000, 0x0000 }, /* R991 */
{ 0x0000, 0x0000 }, /* R992 */
{ 0x0000, 0x0000 }, /* R993 */
{ 0x0000, 0x0000 }, /* R994 */
{ 0x0000, 0x0000 }, /* R995 */
{ 0x0000, 0x0000 }, /* R996 */
{ 0x0000, 0x0000 }, /* R997 */
{ 0x0000, 0x0000 }, /* R998 */
{ 0x0000, 0x0000 }, /* R999 */
{ 0x0000, 0x0000 }, /* R1000 */
{ 0x0000, 0x0000 }, /* R1001 */
{ 0x0000, 0x0000 }, /* R1002 */
{ 0x0000, 0x0000 }, /* R1003 */
{ 0x0000, 0x0000 }, /* R1004 */
{ 0x0000, 0x0000 }, /* R1005 */
{ 0x0000, 0x0000 }, /* R1006 */
{ 0x0000, 0x0000 }, /* R1007 */
{ 0x0000, 0x0000 }, /* R1008 */
{ 0x0000, 0x0000 }, /* R1009 */
{ 0x0000, 0x0000 }, /* R1010 */
{ 0x0000, 0x0000 }, /* R1011 */
{ 0x0000, 0x0000 }, /* R1012 */
{ 0x0000, 0x0000 }, /* R1013 */
{ 0x0000, 0x0000 }, /* R1014 */
{ 0x0000, 0x0000 }, /* R1015 */
{ 0x0000, 0x0000 }, /* R1016 */
{ 0x0000, 0x0000 }, /* R1017 */
{ 0x0000, 0x0000 }, /* R1018 */
{ 0x0000, 0x0000 }, /* R1019 */
{ 0x0000, 0x0000 }, /* R1020 */
{ 0x0000, 0x0000 }, /* R1021 */
{ 0x0000, 0x0000 }, /* R1022 */
{ 0x0000, 0x0000 }, /* R1023 */
{ 0x00FF, 0x01FF }, /* R1024 - AIF1 ADC1 Left Volume */
{ 0x00FF, 0x01FF }, /* R1025 - AIF1 ADC1 Right Volume */
{ 0x00FF, 0x01FF }, /* R1026 - AIF1 DAC1 Left Volume */
{ 0x00FF, 0x01FF }, /* R1027 - AIF1 DAC1 Right Volume */
{ 0x00FF, 0x01FF }, /* R1028 - AIF1 ADC2 Left Volume */
{ 0x00FF, 0x01FF }, /* R1029 - AIF1 ADC2 Right Volume */
{ 0x00FF, 0x01FF }, /* R1030 - AIF1 DAC2 Left Volume */
{ 0x00FF, 0x01FF }, /* R1031 - AIF1 DAC2 Right Volume */
{ 0x0000, 0x0000 }, /* R1032 */
{ 0x0000, 0x0000 }, /* R1033 */
{ 0x0000, 0x0000 }, /* R1034 */
{ 0x0000, 0x0000 }, /* R1035 */
{ 0x0000, 0x0000 }, /* R1036 */
{ 0x0000, 0x0000 }, /* R1037 */
{ 0x0000, 0x0000 }, /* R1038 */
{ 0x0000, 0x0000 }, /* R1039 */
{ 0xF800, 0xF800 }, /* R1040 - AIF1 ADC1 Filters */
{ 0x7800, 0x7800 }, /* R1041 - AIF1 ADC2 Filters */
{ 0x0000, 0x0000 }, /* R1042 */
{ 0x0000, 0x0000 }, /* R1043 */
{ 0x0000, 0x0000 }, /* R1044 */
{ 0x0000, 0x0000 }, /* R1045 */
{ 0x0000, 0x0000 }, /* R1046 */
{ 0x0000, 0x0000 }, /* R1047 */
{ 0x0000, 0x0000 }, /* R1048 */
{ 0x0000, 0x0000 }, /* R1049 */
{ 0x0000, 0x0000 }, /* R1050 */
{ 0x0000, 0x0000 }, /* R1051 */
{ 0x0000, 0x0000 }, /* R1052 */
{ 0x0000, 0x0000 }, /* R1053 */
{ 0x0000, 0x0000 }, /* R1054 */
{ 0x0000, 0x0000 }, /* R1055 */
{ 0x02B6, 0x02B6 }, /* R1056 - AIF1 DAC1 Filters (1) */
{ 0x3F00, 0x3F00 }, /* R1057 - AIF1 DAC1 Filters (2) */
{ 0x02B6, 0x02B6 }, /* R1058 - AIF1 DAC2 Filters (1) */
{ 0x3F00, 0x3F00 }, /* R1059 - AIF1 DAC2 Filters (2) */
{ 0x0000, 0x0000 }, /* R1060 */
{ 0x0000, 0x0000 }, /* R1061 */
{ 0x0000, 0x0000 }, /* R1062 */
{ 0x0000, 0x0000 }, /* R1063 */
{ 0x0000, 0x0000 }, /* R1064 */
{ 0x0000, 0x0000 }, /* R1065 */
{ 0x0000, 0x0000 }, /* R1066 */
{ 0x0000, 0x0000 }, /* R1067 */
{ 0x0000, 0x0000 }, /* R1068 */
{ 0x0000, 0x0000 }, /* R1069 */
{ 0x0000, 0x0000 }, /* R1070 */
{ 0x0000, 0x0000 }, /* R1071 */
{ 0x0000, 0x0000 }, /* R1072 */
{ 0x0000, 0x0000 }, /* R1073 */
{ 0x0000, 0x0000 }, /* R1074 */
{ 0x0000, 0x0000 }, /* R1075 */
{ 0x0000, 0x0000 }, /* R1076 */
{ 0x0000, 0x0000 }, /* R1077 */
{ 0x0000, 0x0000 }, /* R1078 */
{ 0x0000, 0x0000 }, /* R1079 */
{ 0x0000, 0x0000 }, /* R1080 */
{ 0x0000, 0x0000 }, /* R1081 */
{ 0x0000, 0x0000 }, /* R1082 */
{ 0x0000, 0x0000 }, /* R1083 */
{ 0x0000, 0x0000 }, /* R1084 */
{ 0x0000, 0x0000 }, /* R1085 */
{ 0x0000, 0x0000 }, /* R1086 */
{ 0x0000, 0x0000 }, /* R1087 */
{ 0xFFFF, 0xFFFF }, /* R1088 - AIF1 DRC1 (1) */
{ 0x1FFF, 0x1FFF }, /* R1089 - AIF1 DRC1 (2) */
{ 0xFFFF, 0xFFFF }, /* R1090 - AIF1 DRC1 (3) */
{ 0x07FF, 0x07FF }, /* R1091 - AIF1 DRC1 (4) */
{ 0x03FF, 0x03FF }, /* R1092 - AIF1 DRC1 (5) */
{ 0x0000, 0x0000 }, /* R1093 */
{ 0x0000, 0x0000 }, /* R1094 */
{ 0x0000, 0x0000 }, /* R1095 */
{ 0x0000, 0x0000 }, /* R1096 */
{ 0x0000, 0x0000 }, /* R1097 */
{ 0x0000, 0x0000 }, /* R1098 */
{ 0x0000, 0x0000 }, /* R1099 */
{ 0x0000, 0x0000 }, /* R1100 */
{ 0x0000, 0x0000 }, /* R1101 */
{ 0x0000, 0x0000 }, /* R1102 */
{ 0x0000, 0x0000 }, /* R1103 */
{ 0xFFFF, 0xFFFF }, /* R1104 - AIF1 DRC2 (1) */
{ 0x1FFF, 0x1FFF }, /* R1105 - AIF1 DRC2 (2) */
{ 0xFFFF, 0xFFFF }, /* R1106 - AIF1 DRC2 (3) */
{ 0x07FF, 0x07FF }, /* R1107 - AIF1 DRC2 (4) */
{ 0x03FF, 0x03FF }, /* R1108 - AIF1 DRC2 (5) */
{ 0x0000, 0x0000 }, /* R1109 */
{ 0x0000, 0x0000 }, /* R1110 */
{ 0x0000, 0x0000 }, /* R1111 */
{ 0x0000, 0x0000 }, /* R1112 */
{ 0x0000, 0x0000 }, /* R1113 */
{ 0x0000, 0x0000 }, /* R1114 */
{ 0x0000, 0x0000 }, /* R1115 */
{ 0x0000, 0x0000 }, /* R1116 */
{ 0x0000, 0x0000 }, /* R1117 */
{ 0x0000, 0x0000 }, /* R1118 */
{ 0x0000, 0x0000 }, /* R1119 */
{ 0x0000, 0x0000 }, /* R1120 */
{ 0x0000, 0x0000 }, /* R1121 */
{ 0x0000, 0x0000 }, /* R1122 */
{ 0x0000, 0x0000 }, /* R1123 */
{ 0x0000, 0x0000 }, /* R1124 */
{ 0x0000, 0x0000 }, /* R1125 */
{ 0x0000, 0x0000 }, /* R1126 */
{ 0x0000, 0x0000 }, /* R1127 */
{ 0x0000, 0x0000 }, /* R1128 */
{ 0x0000, 0x0000 }, /* R1129 */
{ 0x0000, 0x0000 }, /* R1130 */
{ 0x0000, 0x0000 }, /* R1131 */
{ 0x0000, 0x0000 }, /* R1132 */
{ 0x0000, 0x0000 }, /* R1133 */
{ 0x0000, 0x0000 }, /* R1134 */
{ 0x0000, 0x0000 }, /* R1135 */
{ 0x0000, 0x0000 }, /* R1136 */
{ 0x0000, 0x0000 }, /* R1137 */
{ 0x0000, 0x0000 }, /* R1138 */
{ 0x0000, 0x0000 }, /* R1139 */
{ 0x0000, 0x0000 }, /* R1140 */
{ 0x0000, 0x0000 }, /* R1141 */
{ 0x0000, 0x0000 }, /* R1142 */
{ 0x0000, 0x0000 }, /* R1143 */
{ 0x0000, 0x0000 }, /* R1144 */
{ 0x0000, 0x0000 }, /* R1145 */
{ 0x0000, 0x0000 }, /* R1146 */
{ 0x0000, 0x0000 }, /* R1147 */
{ 0x0000, 0x0000 }, /* R1148 */
{ 0x0000, 0x0000 }, /* R1149 */
{ 0x0000, 0x0000 }, /* R1150 */
{ 0x0000, 0x0000 }, /* R1151 */
{ 0xFFFF, 0xFFFF }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
{ 0xFFC0, 0xFFC0 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
{ 0xFFFF, 0xFFFF }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
{ 0xFFFF, 0xFFFF }, /* R1155 - AIF1 DAC1 EQ Band 1 B */
{ 0xFFFF, 0xFFFF }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
{ 0xFFFF, 0xFFFF }, /* R1157 - AIF1 DAC1 EQ Band 2 A */
{ 0xFFFF, 0xFFFF }, /* R1158 - AIF1 DAC1 EQ Band 2 B */
{ 0xFFFF, 0xFFFF }, /* R1159 - AIF1 DAC1 EQ Band 2 C */
{ 0xFFFF, 0xFFFF }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
{ 0xFFFF, 0xFFFF }, /* R1161 - AIF1 DAC1 EQ Band 3 A */
{ 0xFFFF, 0xFFFF }, /* R1162 - AIF1 DAC1 EQ Band 3 B */
{ 0xFFFF, 0xFFFF }, /* R1163 - AIF1 DAC1 EQ Band 3 C */
{ 0xFFFF, 0xFFFF }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
{ 0xFFFF, 0xFFFF }, /* R1165 - AIF1 DAC1 EQ Band 4 A */
{ 0xFFFF, 0xFFFF }, /* R1166 - AIF1 DAC1 EQ Band 4 B */
{ 0xFFFF, 0xFFFF }, /* R1167 - AIF1 DAC1 EQ Band 4 C */
{ 0xFFFF, 0xFFFF }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
{ 0xFFFF, 0xFFFF }, /* R1169 - AIF1 DAC1 EQ Band 5 A */
{ 0xFFFF, 0xFFFF }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
{ 0xFFFF, 0xFFFF }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
{ 0x0000, 0x0000 }, /* R1172 */
{ 0x0000, 0x0000 }, /* R1173 */
{ 0x0000, 0x0000 }, /* R1174 */
{ 0x0000, 0x0000 }, /* R1175 */
{ 0x0000, 0x0000 }, /* R1176 */
{ 0x0000, 0x0000 }, /* R1177 */
{ 0x0000, 0x0000 }, /* R1178 */
{ 0x0000, 0x0000 }, /* R1179 */
{ 0x0000, 0x0000 }, /* R1180 */
{ 0x0000, 0x0000 }, /* R1181 */
{ 0x0000, 0x0000 }, /* R1182 */
{ 0x0000, 0x0000 }, /* R1183 */
{ 0xFFFF, 0xFFFF }, /* R1184 - AIF1 DAC2 EQ Gains (1) */
{ 0xFFC0, 0xFFC0 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */
{ 0xFFFF, 0xFFFF }, /* R1186 - AIF1 DAC2 EQ Band 1 A */
{ 0xFFFF, 0xFFFF }, /* R1187 - AIF1 DAC2 EQ Band 1 B */
{ 0xFFFF, 0xFFFF }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */
{ 0xFFFF, 0xFFFF }, /* R1189 - AIF1 DAC2 EQ Band 2 A */
{ 0xFFFF, 0xFFFF }, /* R1190 - AIF1 DAC2 EQ Band 2 B */
{ 0xFFFF, 0xFFFF }, /* R1191 - AIF1 DAC2 EQ Band 2 C */
{ 0xFFFF, 0xFFFF }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */
{ 0xFFFF, 0xFFFF }, /* R1193 - AIF1 DAC2 EQ Band 3 A */
{ 0xFFFF, 0xFFFF }, /* R1194 - AIF1 DAC2 EQ Band 3 B */
{ 0xFFFF, 0xFFFF }, /* R1195 - AIF1 DAC2 EQ Band 3 C */
{ 0xFFFF, 0xFFFF }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */
{ 0xFFFF, 0xFFFF }, /* R1197 - AIF1 DAC2 EQ Band 4 A */
{ 0xFFFF, 0xFFFF }, /* R1198 - AIF1 DAC2 EQ Band 4 B */
{ 0xFFFF, 0xFFFF }, /* R1199 - AIF1 DAC2 EQ Band 4 C */
{ 0xFFFF, 0xFFFF }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */
{ 0xFFFF, 0xFFFF }, /* R1201 - AIF1 DAC2 EQ Band 5 A */
{ 0xFFFF, 0xFFFF }, /* R1202 - AIF1 DAC2 EQ Band 5 B */
{ 0xFFFF, 0xFFFF }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */
{ 0x0000, 0x0000 }, /* R1204 */
{ 0x0000, 0x0000 }, /* R1205 */
{ 0x0000, 0x0000 }, /* R1206 */
{ 0x0000, 0x0000 }, /* R1207 */
{ 0x0000, 0x0000 }, /* R1208 */
{ 0x0000, 0x0000 }, /* R1209 */
{ 0x0000, 0x0000 }, /* R1210 */
{ 0x0000, 0x0000 }, /* R1211 */
{ 0x0000, 0x0000 }, /* R1212 */
{ 0x0000, 0x0000 }, /* R1213 */
{ 0x0000, 0x0000 }, /* R1214 */
{ 0x0000, 0x0000 }, /* R1215 */
{ 0x0000, 0x0000 }, /* R1216 */
{ 0x0000, 0x0000 }, /* R1217 */
{ 0x0000, 0x0000 }, /* R1218 */
{ 0x0000, 0x0000 }, /* R1219 */
{ 0x0000, 0x0000 }, /* R1220 */
{ 0x0000, 0x0000 }, /* R1221 */
{ 0x0000, 0x0000 }, /* R1222 */
{ 0x0000, 0x0000 }, /* R1223 */
{ 0x0000, 0x0000 }, /* R1224 */
{ 0x0000, 0x0000 }, /* R1225 */
{ 0x0000, 0x0000 }, /* R1226 */
{ 0x0000, 0x0000 }, /* R1227 */
{ 0x0000, 0x0000 }, /* R1228 */
{ 0x0000, 0x0000 }, /* R1229 */
{ 0x0000, 0x0000 }, /* R1230 */
{ 0x0000, 0x0000 }, /* R1231 */
{ 0x0000, 0x0000 }, /* R1232 */
{ 0x0000, 0x0000 }, /* R1233 */
{ 0x0000, 0x0000 }, /* R1234 */
{ 0x0000, 0x0000 }, /* R1235 */
{ 0x0000, 0x0000 }, /* R1236 */
{ 0x0000, 0x0000 }, /* R1237 */
{ 0x0000, 0x0000 }, /* R1238 */
{ 0x0000, 0x0000 }, /* R1239 */
{ 0x0000, 0x0000 }, /* R1240 */
{ 0x0000, 0x0000 }, /* R1241 */
{ 0x0000, 0x0000 }, /* R1242 */
{ 0x0000, 0x0000 }, /* R1243 */
{ 0x0000, 0x0000 }, /* R1244 */
{ 0x0000, 0x0000 }, /* R1245 */
{ 0x0000, 0x0000 }, /* R1246 */
{ 0x0000, 0x0000 }, /* R1247 */
{ 0x0000, 0x0000 }, /* R1248 */
{ 0x0000, 0x0000 }, /* R1249 */
{ 0x0000, 0x0000 }, /* R1250 */
{ 0x0000, 0x0000 }, /* R1251 */
{ 0x0000, 0x0000 }, /* R1252 */
{ 0x0000, 0x0000 }, /* R1253 */
{ 0x0000, 0x0000 }, /* R1254 */
{ 0x0000, 0x0000 }, /* R1255 */
{ 0x0000, 0x0000 }, /* R1256 */
{ 0x0000, 0x0000 }, /* R1257 */
{ 0x0000, 0x0000 }, /* R1258 */
{ 0x0000, 0x0000 }, /* R1259 */
{ 0x0000, 0x0000 }, /* R1260 */
{ 0x0000, 0x0000 }, /* R1261 */
{ 0x0000, 0x0000 }, /* R1262 */
{ 0x0000, 0x0000 }, /* R1263 */
{ 0x0000, 0x0000 }, /* R1264 */
{ 0x0000, 0x0000 }, /* R1265 */
{ 0x0000, 0x0000 }, /* R1266 */
{ 0x0000, 0x0000 }, /* R1267 */
{ 0x0000, 0x0000 }, /* R1268 */
{ 0x0000, 0x0000 }, /* R1269 */
{ 0x0000, 0x0000 }, /* R1270 */
{ 0x0000, 0x0000 }, /* R1271 */
{ 0x0000, 0x0000 }, /* R1272 */
{ 0x0000, 0x0000 }, /* R1273 */
{ 0x0000, 0x0000 }, /* R1274 */
{ 0x0000, 0x0000 }, /* R1275 */
{ 0x0000, 0x0000 }, /* R1276 */
{ 0x0000, 0x0000 }, /* R1277 */
{ 0x0000, 0x0000 }, /* R1278 */
{ 0x0000, 0x0000 }, /* R1279 */
{ 0x00FF, 0x01FF }, /* R1280 - AIF2 ADC Left Volume */
{ 0x00FF, 0x01FF }, /* R1281 - AIF2 ADC Right Volume */
{ 0x00FF, 0x01FF }, /* R1282 - AIF2 DAC Left Volume */
{ 0x00FF, 0x01FF }, /* R1283 - AIF2 DAC Right Volume */
{ 0x0000, 0x0000 }, /* R1284 */
{ 0x0000, 0x0000 }, /* R1285 */
{ 0x0000, 0x0000 }, /* R1286 */
{ 0x0000, 0x0000 }, /* R1287 */
{ 0x0000, 0x0000 }, /* R1288 */
{ 0x0000, 0x0000 }, /* R1289 */
{ 0x0000, 0x0000 }, /* R1290 */
{ 0x0000, 0x0000 }, /* R1291 */
{ 0x0000, 0x0000 }, /* R1292 */
{ 0x0000, 0x0000 }, /* R1293 */
{ 0x0000, 0x0000 }, /* R1294 */
{ 0x0000, 0x0000 }, /* R1295 */
{ 0xF800, 0xF800 }, /* R1296 - AIF2 ADC Filters */
{ 0x0000, 0x0000 }, /* R1297 */
{ 0x0000, 0x0000 }, /* R1298 */
{ 0x0000, 0x0000 }, /* R1299 */
{ 0x0000, 0x0000 }, /* R1300 */
{ 0x0000, 0x0000 }, /* R1301 */
{ 0x0000, 0x0000 }, /* R1302 */
{ 0x0000, 0x0000 }, /* R1303 */
{ 0x0000, 0x0000 }, /* R1304 */
{ 0x0000, 0x0000 }, /* R1305 */
{ 0x0000, 0x0000 }, /* R1306 */
{ 0x0000, 0x0000 }, /* R1307 */
{ 0x0000, 0x0000 }, /* R1308 */
{ 0x0000, 0x0000 }, /* R1309 */
{ 0x0000, 0x0000 }, /* R1310 */
{ 0x0000, 0x0000 }, /* R1311 */
{ 0x02B6, 0x02B6 }, /* R1312 - AIF2 DAC Filters (1) */
{ 0x3F00, 0x3F00 }, /* R1313 - AIF2 DAC Filters (2) */
{ 0x0000, 0x0000 }, /* R1314 */
{ 0x0000, 0x0000 }, /* R1315 */
{ 0x0000, 0x0000 }, /* R1316 */
{ 0x0000, 0x0000 }, /* R1317 */
{ 0x0000, 0x0000 }, /* R1318 */
{ 0x0000, 0x0000 }, /* R1319 */
{ 0x0000, 0x0000 }, /* R1320 */
{ 0x0000, 0x0000 }, /* R1321 */
{ 0x0000, 0x0000 }, /* R1322 */
{ 0x0000, 0x0000 }, /* R1323 */
{ 0x0000, 0x0000 }, /* R1324 */
{ 0x0000, 0x0000 }, /* R1325 */
{ 0x0000, 0x0000 }, /* R1326 */
{ 0x0000, 0x0000 }, /* R1327 */
{ 0x0000, 0x0000 }, /* R1328 */
{ 0x0000, 0x0000 }, /* R1329 */
{ 0x0000, 0x0000 }, /* R1330 */
{ 0x0000, 0x0000 }, /* R1331 */
{ 0x0000, 0x0000 }, /* R1332 */
{ 0x0000, 0x0000 }, /* R1333 */
{ 0x0000, 0x0000 }, /* R1334 */
{ 0x0000, 0x0000 }, /* R1335 */
{ 0x0000, 0x0000 }, /* R1336 */
{ 0x0000, 0x0000 }, /* R1337 */
{ 0x0000, 0x0000 }, /* R1338 */
{ 0x0000, 0x0000 }, /* R1339 */
{ 0x0000, 0x0000 }, /* R1340 */
{ 0x0000, 0x0000 }, /* R1341 */
{ 0x0000, 0x0000 }, /* R1342 */
{ 0x0000, 0x0000 }, /* R1343 */
{ 0xFFFF, 0xFFFF }, /* R1344 - AIF2 DRC (1) */
{ 0x1FFF, 0x1FFF }, /* R1345 - AIF2 DRC (2) */
{ 0xFFFF, 0xFFFF }, /* R1346 - AIF2 DRC (3) */
{ 0x07FF, 0x07FF }, /* R1347 - AIF2 DRC (4) */
{ 0x03FF, 0x03FF }, /* R1348 - AIF2 DRC (5) */
{ 0x0000, 0x0000 }, /* R1349 */
{ 0x0000, 0x0000 }, /* R1350 */
{ 0x0000, 0x0000 }, /* R1351 */
{ 0x0000, 0x0000 }, /* R1352 */
{ 0x0000, 0x0000 }, /* R1353 */
{ 0x0000, 0x0000 }, /* R1354 */
{ 0x0000, 0x0000 }, /* R1355 */
{ 0x0000, 0x0000 }, /* R1356 */
{ 0x0000, 0x0000 }, /* R1357 */
{ 0x0000, 0x0000 }, /* R1358 */
{ 0x0000, 0x0000 }, /* R1359 */
{ 0x0000, 0x0000 }, /* R1360 */
{ 0x0000, 0x0000 }, /* R1361 */
{ 0x0000, 0x0000 }, /* R1362 */
{ 0x0000, 0x0000 }, /* R1363 */
{ 0x0000, 0x0000 }, /* R1364 */
{ 0x0000, 0x0000 }, /* R1365 */
{ 0x0000, 0x0000 }, /* R1366 */
{ 0x0000, 0x0000 }, /* R1367 */
{ 0x0000, 0x0000 }, /* R1368 */
{ 0x0000, 0x0000 }, /* R1369 */
{ 0x0000, 0x0000 }, /* R1370 */
{ 0x0000, 0x0000 }, /* R1371 */
{ 0x0000, 0x0000 }, /* R1372 */
{ 0x0000, 0x0000 }, /* R1373 */
{ 0x0000, 0x0000 }, /* R1374 */
{ 0x0000, 0x0000 }, /* R1375 */
{ 0x0000, 0x0000 }, /* R1376 */
{ 0x0000, 0x0000 }, /* R1377 */
{ 0x0000, 0x0000 }, /* R1378 */
{ 0x0000, 0x0000 }, /* R1379 */
{ 0x0000, 0x0000 }, /* R1380 */
{ 0x0000, 0x0000 }, /* R1381 */
{ 0x0000, 0x0000 }, /* R1382 */
{ 0x0000, 0x0000 }, /* R1383 */
{ 0x0000, 0x0000 }, /* R1384 */
{ 0x0000, 0x0000 }, /* R1385 */
{ 0x0000, 0x0000 }, /* R1386 */
{ 0x0000, 0x0000 }, /* R1387 */
{ 0x0000, 0x0000 }, /* R1388 */
{ 0x0000, 0x0000 }, /* R1389 */
{ 0x0000, 0x0000 }, /* R1390 */
{ 0x0000, 0x0000 }, /* R1391 */
{ 0x0000, 0x0000 }, /* R1392 */
{ 0x0000, 0x0000 }, /* R1393 */
{ 0x0000, 0x0000 }, /* R1394 */
{ 0x0000, 0x0000 }, /* R1395 */
{ 0x0000, 0x0000 }, /* R1396 */
{ 0x0000, 0x0000 }, /* R1397 */
{ 0x0000, 0x0000 }, /* R1398 */
{ 0x0000, 0x0000 }, /* R1399 */
{ 0x0000, 0x0000 }, /* R1400 */
{ 0x0000, 0x0000 }, /* R1401 */
{ 0x0000, 0x0000 }, /* R1402 */
{ 0x0000, 0x0000 }, /* R1403 */
{ 0x0000, 0x0000 }, /* R1404 */
{ 0x0000, 0x0000 }, /* R1405 */
{ 0x0000, 0x0000 }, /* R1406 */
{ 0x0000, 0x0000 }, /* R1407 */
{ 0xFFFF, 0xFFFF }, /* R1408 - AIF2 EQ Gains (1) */
{ 0xFFC0, 0xFFC0 }, /* R1409 - AIF2 EQ Gains (2) */
{ 0xFFFF, 0xFFFF }, /* R1410 - AIF2 EQ Band 1 A */
{ 0xFFFF, 0xFFFF }, /* R1411 - AIF2 EQ Band 1 B */
{ 0xFFFF, 0xFFFF }, /* R1412 - AIF2 EQ Band 1 PG */
{ 0xFFFF, 0xFFFF }, /* R1413 - AIF2 EQ Band 2 A */
{ 0xFFFF, 0xFFFF }, /* R1414 - AIF2 EQ Band 2 B */
{ 0xFFFF, 0xFFFF }, /* R1415 - AIF2 EQ Band 2 C */
{ 0xFFFF, 0xFFFF }, /* R1416 - AIF2 EQ Band 2 PG */
{ 0xFFFF, 0xFFFF }, /* R1417 - AIF2 EQ Band 3 A */
{ 0xFFFF, 0xFFFF }, /* R1418 - AIF2 EQ Band 3 B */
{ 0xFFFF, 0xFFFF }, /* R1419 - AIF2 EQ Band 3 C */
{ 0xFFFF, 0xFFFF }, /* R1420 - AIF2 EQ Band 3 PG */
{ 0xFFFF, 0xFFFF }, /* R1421 - AIF2 EQ Band 4 A */
{ 0xFFFF, 0xFFFF }, /* R1422 - AIF2 EQ Band 4 B */
{ 0xFFFF, 0xFFFF }, /* R1423 - AIF2 EQ Band 4 C */
{ 0xFFFF, 0xFFFF }, /* R1424 - AIF2 EQ Band 4 PG */
{ 0xFFFF, 0xFFFF }, /* R1425 - AIF2 EQ Band 5 A */
{ 0xFFFF, 0xFFFF }, /* R1426 - AIF2 EQ Band 5 B */
{ 0xFFFF, 0xFFFF }, /* R1427 - AIF2 EQ Band 5 PG */
{ 0x0000, 0x0000 }, /* R1428 */
{ 0x0000, 0x0000 }, /* R1429 */
{ 0x0000, 0x0000 }, /* R1430 */
{ 0x0000, 0x0000 }, /* R1431 */
{ 0x0000, 0x0000 }, /* R1432 */
{ 0x0000, 0x0000 }, /* R1433 */
{ 0x0000, 0x0000 }, /* R1434 */
{ 0x0000, 0x0000 }, /* R1435 */
{ 0x0000, 0x0000 }, /* R1436 */
{ 0x0000, 0x0000 }, /* R1437 */
{ 0x0000, 0x0000 }, /* R1438 */
{ 0x0000, 0x0000 }, /* R1439 */
{ 0x0000, 0x0000 }, /* R1440 */
{ 0x0000, 0x0000 }, /* R1441 */
{ 0x0000, 0x0000 }, /* R1442 */
{ 0x0000, 0x0000 }, /* R1443 */
{ 0x0000, 0x0000 }, /* R1444 */
{ 0x0000, 0x0000 }, /* R1445 */
{ 0x0000, 0x0000 }, /* R1446 */
{ 0x0000, 0x0000 }, /* R1447 */
{ 0x0000, 0x0000 }, /* R1448 */
{ 0x0000, 0x0000 }, /* R1449 */
{ 0x0000, 0x0000 }, /* R1450 */
{ 0x0000, 0x0000 }, /* R1451 */
{ 0x0000, 0x0000 }, /* R1452 */
{ 0x0000, 0x0000 }, /* R1453 */
{ 0x0000, 0x0000 }, /* R1454 */
{ 0x0000, 0x0000 }, /* R1455 */
{ 0x0000, 0x0000 }, /* R1456 */
{ 0x0000, 0x0000 }, /* R1457 */
{ 0x0000, 0x0000 }, /* R1458 */
{ 0x0000, 0x0000 }, /* R1459 */
{ 0x0000, 0x0000 }, /* R1460 */
{ 0x0000, 0x0000 }, /* R1461 */
{ 0x0000, 0x0000 }, /* R1462 */
{ 0x0000, 0x0000 }, /* R1463 */
{ 0x0000, 0x0000 }, /* R1464 */
{ 0x0000, 0x0000 }, /* R1465 */
{ 0x0000, 0x0000 }, /* R1466 */
{ 0x0000, 0x0000 }, /* R1467 */
{ 0x0000, 0x0000 }, /* R1468 */
{ 0x0000, 0x0000 }, /* R1469 */
{ 0x0000, 0x0000 }, /* R1470 */
{ 0x0000, 0x0000 }, /* R1471 */
{ 0x0000, 0x0000 }, /* R1472 */
{ 0x0000, 0x0000 }, /* R1473 */
{ 0x0000, 0x0000 }, /* R1474 */
{ 0x0000, 0x0000 }, /* R1475 */
{ 0x0000, 0x0000 }, /* R1476 */
{ 0x0000, 0x0000 }, /* R1477 */
{ 0x0000, 0x0000 }, /* R1478 */
{ 0x0000, 0x0000 }, /* R1479 */
{ 0x0000, 0x0000 }, /* R1480 */
{ 0x0000, 0x0000 }, /* R1481 */
{ 0x0000, 0x0000 }, /* R1482 */
{ 0x0000, 0x0000 }, /* R1483 */
{ 0x0000, 0x0000 }, /* R1484 */
{ 0x0000, 0x0000 }, /* R1485 */
{ 0x0000, 0x0000 }, /* R1486 */
{ 0x0000, 0x0000 }, /* R1487 */
{ 0x0000, 0x0000 }, /* R1488 */
{ 0x0000, 0x0000 }, /* R1489 */
{ 0x0000, 0x0000 }, /* R1490 */
{ 0x0000, 0x0000 }, /* R1491 */
{ 0x0000, 0x0000 }, /* R1492 */
{ 0x0000, 0x0000 }, /* R1493 */
{ 0x0000, 0x0000 }, /* R1494 */
{ 0x0000, 0x0000 }, /* R1495 */
{ 0x0000, 0x0000 }, /* R1496 */
{ 0x0000, 0x0000 }, /* R1497 */
{ 0x0000, 0x0000 }, /* R1498 */
{ 0x0000, 0x0000 }, /* R1499 */
{ 0x0000, 0x0000 }, /* R1500 */
{ 0x0000, 0x0000 }, /* R1501 */
{ 0x0000, 0x0000 }, /* R1502 */
{ 0x0000, 0x0000 }, /* R1503 */
{ 0x0000, 0x0000 }, /* R1504 */
{ 0x0000, 0x0000 }, /* R1505 */
{ 0x0000, 0x0000 }, /* R1506 */
{ 0x0000, 0x0000 }, /* R1507 */
{ 0x0000, 0x0000 }, /* R1508 */
{ 0x0000, 0x0000 }, /* R1509 */
{ 0x0000, 0x0000 }, /* R1510 */
{ 0x0000, 0x0000 }, /* R1511 */
{ 0x0000, 0x0000 }, /* R1512 */
{ 0x0000, 0x0000 }, /* R1513 */
{ 0x0000, 0x0000 }, /* R1514 */
{ 0x0000, 0x0000 }, /* R1515 */
{ 0x0000, 0x0000 }, /* R1516 */
{ 0x0000, 0x0000 }, /* R1517 */
{ 0x0000, 0x0000 }, /* R1518 */
{ 0x0000, 0x0000 }, /* R1519 */
{ 0x0000, 0x0000 }, /* R1520 */
{ 0x0000, 0x0000 }, /* R1521 */
{ 0x0000, 0x0000 }, /* R1522 */
{ 0x0000, 0x0000 }, /* R1523 */
{ 0x0000, 0x0000 }, /* R1524 */
{ 0x0000, 0x0000 }, /* R1525 */
{ 0x0000, 0x0000 }, /* R1526 */
{ 0x0000, 0x0000 }, /* R1527 */
{ 0x0000, 0x0000 }, /* R1528 */
{ 0x0000, 0x0000 }, /* R1529 */
{ 0x0000, 0x0000 }, /* R1530 */
{ 0x0000, 0x0000 }, /* R1531 */
{ 0x0000, 0x0000 }, /* R1532 */
{ 0x0000, 0x0000 }, /* R1533 */
{ 0x0000, 0x0000 }, /* R1534 */
{ 0x0000, 0x0000 }, /* R1535 */
{ 0x01EF, 0x01EF }, /* R1536 - DAC1 Mixer Volumes */
{ 0x0037, 0x0037 }, /* R1537 - DAC1 Left Mixer Routing */
{ 0x0037, 0x0037 }, /* R1538 - DAC1 Right Mixer Routing */
{ 0x01EF, 0x01EF }, /* R1539 - DAC2 Mixer Volumes */
{ 0x0037, 0x0037 }, /* R1540 - DAC2 Left Mixer Routing */
{ 0x0037, 0x0037 }, /* R1541 - DAC2 Right Mixer Routing */
{ 0x0003, 0x0003 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
{ 0x0003, 0x0003 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
{ 0x0003, 0x0003 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */
{ 0x0003, 0x0003 }, /* R1545 - AIF1 ADC2 Right mixer Routing */
{ 0x0000, 0x0000 }, /* R1546 */
{ 0x0000, 0x0000 }, /* R1547 */
{ 0x0000, 0x0000 }, /* R1548 */
{ 0x0000, 0x0000 }, /* R1549 */
{ 0x0000, 0x0000 }, /* R1550 */
{ 0x0000, 0x0000 }, /* R1551 */
{ 0x02FF, 0x03FF }, /* R1552 - DAC1 Left Volume */
{ 0x02FF, 0x03FF }, /* R1553 - DAC1 Right Volume */
{ 0x02FF, 0x03FF }, /* R1554 - DAC2 Left Volume */
{ 0x02FF, 0x03FF }, /* R1555 - DAC2 Right Volume */
{ 0x0003, 0x0003 }, /* R1556 - DAC Softmute */
{ 0x0000, 0x0000 }, /* R1557 */
{ 0x0000, 0x0000 }, /* R1558 */
{ 0x0000, 0x0000 }, /* R1559 */
{ 0x0000, 0x0000 }, /* R1560 */
{ 0x0000, 0x0000 }, /* R1561 */
{ 0x0000, 0x0000 }, /* R1562 */
{ 0x0000, 0x0000 }, /* R1563 */
{ 0x0000, 0x0000 }, /* R1564 */
{ 0x0000, 0x0000 }, /* R1565 */
{ 0x0000, 0x0000 }, /* R1566 */
{ 0x0000, 0x0000 }, /* R1567 */
{ 0x0003, 0x0003 }, /* R1568 - Oversampling */
{ 0x03C3, 0x03C3 }, /* R1569 - Sidetone */
};
const u16 wm8994_reg_defaults[WM8994_CACHE_SIZE] = {
0x8994, /* R0 - Software Reset */
0x0000, /* R1 - Power Management (1) */
0x6000, /* R2 - Power Management (2) */
0x0000, /* R3 - Power Management (3) */
0x0000, /* R4 - Power Management (4) */
0x0000, /* R5 - Power Management (5) */
0x0000, /* R6 - Power Management (6) */
0x0000, /* R7 */
0x0000, /* R8 */
0x0000, /* R9 */
0x0000, /* R10 */
0x0000, /* R11 */
0x0000, /* R12 */
0x0000, /* R13 */
0x0000, /* R14 */
0x0000, /* R15 */
0x0000, /* R16 */
0x0000, /* R17 */
0x0000, /* R18 */
0x0000, /* R19 */
0x0000, /* R20 */
0x0000, /* R21 - Input Mixer (1) */
0x0000, /* R22 */
0x0000, /* R23 */
0x008B, /* R24 - Left Line Input 1&2 Volume */
0x008B, /* R25 - Left Line Input 3&4 Volume */
0x008B, /* R26 - Right Line Input 1&2 Volume */
0x008B, /* R27 - Right Line Input 3&4 Volume */
0x006D, /* R28 - Left Output Volume */
0x006D, /* R29 - Right Output Volume */
0x0066, /* R30 - Line Outputs Volume */
0x0020, /* R31 - HPOUT2 Volume */
0x0079, /* R32 - Left OPGA Volume */
0x0079, /* R33 - Right OPGA Volume */
0x0003, /* R34 - SPKMIXL Attenuation */
0x0003, /* R35 - SPKMIXR Attenuation */
0x0011, /* R36 - SPKOUT Mixers */
0x0140, /* R37 - ClassD */
0x0079, /* R38 - Speaker Volume Left */
0x0079, /* R39 - Speaker Volume Right */
0x0000, /* R40 - Input Mixer (2) */
0x0000, /* R41 - Input Mixer (3) */
0x0000, /* R42 - Input Mixer (4) */
0x0000, /* R43 - Input Mixer (5) */
0x0000, /* R44 - Input Mixer (6) */
0x0000, /* R45 - Output Mixer (1) */
0x0000, /* R46 - Output Mixer (2) */
0x0000, /* R47 - Output Mixer (3) */
0x0000, /* R48 - Output Mixer (4) */
0x0000, /* R49 - Output Mixer (5) */
0x0000, /* R50 - Output Mixer (6) */
0x0000, /* R51 - HPOUT2 Mixer */
0x0000, /* R52 - Line Mixer (1) */
0x0000, /* R53 - Line Mixer (2) */
0x0000, /* R54 - Speaker Mixer */
0x0000, /* R55 - Additional Control */
0x0000, /* R56 - AntiPOP (1) */
0x0000, /* R57 - AntiPOP (2) */
0x0000, /* R58 - MICBIAS */
0x000D, /* R59 - LDO 1 */
0x0003, /* R60 - LDO 2 */
0x0000, /* R61 */
0x0000, /* R62 */
0x0000, /* R63 */
0x0000, /* R64 */
0x0000, /* R65 */
0x0000, /* R66 */
0x0000, /* R67 */
0x0000, /* R68 */
0x0000, /* R69 */
0x0000, /* R70 */
0x0000, /* R71 */
0x0000, /* R72 */
0x0000, /* R73 */
0x0000, /* R74 */
0x0000, /* R75 */
0x1F25, /* R76 - Charge Pump (1) */
0x0000, /* R77 */
0x0000, /* R78 */
0x0000, /* R79 */
0x0000, /* R80 */
0x0004, /* R81 - Class W (1) */
0x0000, /* R82 */
0x0000, /* R83 */
0x0000, /* R84 - DC Servo (1) */
0x054A, /* R85 - DC Servo (2) */
0x0000, /* R86 */
0x0000, /* R87 - DC Servo (4) */
0x0000, /* R88 - DC Servo Readback */
0x0000, /* R89 */
0x0000, /* R90 */
0x0000, /* R91 */
0x0000, /* R92 */
0x0000, /* R93 */
0x0000, /* R94 */
0x0000, /* R95 */
0x0000, /* R96 - Analogue HP (1) */
0x0000, /* R97 */
0x0000, /* R98 */
0x0000, /* R99 */
0x0000, /* R100 */
0x0000, /* R101 */
0x0000, /* R102 */
0x0000, /* R103 */
0x0000, /* R104 */
0x0000, /* R105 */
0x0000, /* R106 */
0x0000, /* R107 */
0x0000, /* R108 */
0x0000, /* R109 */
0x0000, /* R110 */
0x0000, /* R111 */
0x0000, /* R112 */
0x0000, /* R113 */
0x0000, /* R114 */
0x0000, /* R115 */
0x0000, /* R116 */
0x0000, /* R117 */
0x0000, /* R118 */
0x0000, /* R119 */
0x0000, /* R120 */
0x0000, /* R121 */
0x0000, /* R122 */
0x0000, /* R123 */
0x0000, /* R124 */
0x0000, /* R125 */
0x0000, /* R126 */
0x0000, /* R127 */
0x0000, /* R128 */
0x0000, /* R129 */
0x0000, /* R130 */
0x0000, /* R131 */
0x0000, /* R132 */
0x0000, /* R133 */
0x0000, /* R134 */
0x0000, /* R135 */
0x0000, /* R136 */
0x0000, /* R137 */
0x0000, /* R138 */
0x0000, /* R139 */
0x0000, /* R140 */
0x0000, /* R141 */
0x0000, /* R142 */
0x0000, /* R143 */
0x0000, /* R144 */
0x0000, /* R145 */
0x0000, /* R146 */
0x0000, /* R147 */
0x0000, /* R148 */
0x0000, /* R149 */
0x0000, /* R150 */
0x0000, /* R151 */
0x0000, /* R152 */
0x0000, /* R153 */
0x0000, /* R154 */
0x0000, /* R155 */
0x0000, /* R156 */
0x0000, /* R157 */
0x0000, /* R158 */
0x0000, /* R159 */
0x0000, /* R160 */
0x0000, /* R161 */
0x0000, /* R162 */
0x0000, /* R163 */
0x0000, /* R164 */
0x0000, /* R165 */
0x0000, /* R166 */
0x0000, /* R167 */
0x0000, /* R168 */
0x0000, /* R169 */
0x0000, /* R170 */
0x0000, /* R171 */
0x0000, /* R172 */
0x0000, /* R173 */
0x0000, /* R174 */
0x0000, /* R175 */
0x0000, /* R176 */
0x0000, /* R177 */
0x0000, /* R178 */
0x0000, /* R179 */
0x0000, /* R180 */
0x0000, /* R181 */
0x0000, /* R182 */
0x0000, /* R183 */
0x0000, /* R184 */
0x0000, /* R185 */
0x0000, /* R186 */
0x0000, /* R187 */
0x0000, /* R188 */
0x0000, /* R189 */
0x0000, /* R190 */
0x0000, /* R191 */
0x0000, /* R192 */
0x0000, /* R193 */
0x0000, /* R194 */
0x0000, /* R195 */
0x0000, /* R196 */
0x0000, /* R197 */
0x0000, /* R198 */
0x0000, /* R199 */
0x0000, /* R200 */
0x0000, /* R201 */
0x0000, /* R202 */
0x0000, /* R203 */
0x0000, /* R204 */
0x0000, /* R205 */
0x0000, /* R206 */
0x0000, /* R207 */
0x0000, /* R208 */
0x0000, /* R209 */
0x0000, /* R210 */
0x0000, /* R211 */
0x0000, /* R212 */
0x0000, /* R213 */
0x0000, /* R214 */
0x0000, /* R215 */
0x0000, /* R216 */
0x0000, /* R217 */
0x0000, /* R218 */
0x0000, /* R219 */
0x0000, /* R220 */
0x0000, /* R221 */
0x0000, /* R222 */
0x0000, /* R223 */
0x0000, /* R224 */
0x0000, /* R225 */
0x0000, /* R226 */
0x0000, /* R227 */
0x0000, /* R228 */
0x0000, /* R229 */
0x0000, /* R230 */
0x0000, /* R231 */
0x0000, /* R232 */
0x0000, /* R233 */
0x0000, /* R234 */
0x0000, /* R235 */
0x0000, /* R236 */
0x0000, /* R237 */
0x0000, /* R238 */
0x0000, /* R239 */
0x0000, /* R240 */
0x0000, /* R241 */
0x0000, /* R242 */
0x0000, /* R243 */
0x0000, /* R244 */
0x0000, /* R245 */
0x0000, /* R246 */
0x0000, /* R247 */
0x0000, /* R248 */
0x0000, /* R249 */
0x0000, /* R250 */
0x0000, /* R251 */
0x0000, /* R252 */
0x0000, /* R253 */
0x0000, /* R254 */
0x0000, /* R255 */
0x0003, /* R256 - Chip Revision */
0x8004, /* R257 - Control Interface */
0x0000, /* R258 */
0x0000, /* R259 */
0x0000, /* R260 */
0x0000, /* R261 */
0x0000, /* R262 */
0x0000, /* R263 */
0x0000, /* R264 */
0x0000, /* R265 */
0x0000, /* R266 */
0x0000, /* R267 */
0x0000, /* R268 */
0x0000, /* R269 */
0x0000, /* R270 */
0x0000, /* R271 */
0x0000, /* R272 - Write Sequencer Ctrl (1) */
0x0000, /* R273 - Write Sequencer Ctrl (2) */
0x0000, /* R274 */
0x0000, /* R275 */
0x0000, /* R276 */
0x0000, /* R277 */
0x0000, /* R278 */
0x0000, /* R279 */
0x0000, /* R280 */
0x0000, /* R281 */
0x0000, /* R282 */
0x0000, /* R283 */
0x0000, /* R284 */
0x0000, /* R285 */
0x0000, /* R286 */
0x0000, /* R287 */
0x0000, /* R288 */
0x0000, /* R289 */
0x0000, /* R290 */
0x0000, /* R291 */
0x0000, /* R292 */
0x0000, /* R293 */
0x0000, /* R294 */
0x0000, /* R295 */
0x0000, /* R296 */
0x0000, /* R297 */
0x0000, /* R298 */
0x0000, /* R299 */
0x0000, /* R300 */
0x0000, /* R301 */
0x0000, /* R302 */
0x0000, /* R303 */
0x0000, /* R304 */
0x0000, /* R305 */
0x0000, /* R306 */
0x0000, /* R307 */
0x0000, /* R308 */
0x0000, /* R309 */
0x0000, /* R310 */
0x0000, /* R311 */
0x0000, /* R312 */
0x0000, /* R313 */
0x0000, /* R314 */
0x0000, /* R315 */
0x0000, /* R316 */
0x0000, /* R317 */
0x0000, /* R318 */
0x0000, /* R319 */
0x0000, /* R320 */
0x0000, /* R321 */
0x0000, /* R322 */
0x0000, /* R323 */
0x0000, /* R324 */
0x0000, /* R325 */
0x0000, /* R326 */
0x0000, /* R327 */
0x0000, /* R328 */
0x0000, /* R329 */
0x0000, /* R330 */
0x0000, /* R331 */
0x0000, /* R332 */
0x0000, /* R333 */
0x0000, /* R334 */
0x0000, /* R335 */
0x0000, /* R336 */
0x0000, /* R337 */
0x0000, /* R338 */
0x0000, /* R339 */
0x0000, /* R340 */
0x0000, /* R341 */
0x0000, /* R342 */
0x0000, /* R343 */
0x0000, /* R344 */
0x0000, /* R345 */
0x0000, /* R346 */
0x0000, /* R347 */
0x0000, /* R348 */
0x0000, /* R349 */
0x0000, /* R350 */
0x0000, /* R351 */
0x0000, /* R352 */
0x0000, /* R353 */
0x0000, /* R354 */
0x0000, /* R355 */
0x0000, /* R356 */
0x0000, /* R357 */
0x0000, /* R358 */
0x0000, /* R359 */
0x0000, /* R360 */
0x0000, /* R361 */
0x0000, /* R362 */
0x0000, /* R363 */
0x0000, /* R364 */
0x0000, /* R365 */
0x0000, /* R366 */
0x0000, /* R367 */
0x0000, /* R368 */
0x0000, /* R369 */
0x0000, /* R370 */
0x0000, /* R371 */
0x0000, /* R372 */
0x0000, /* R373 */
0x0000, /* R374 */
0x0000, /* R375 */
0x0000, /* R376 */
0x0000, /* R377 */
0x0000, /* R378 */
0x0000, /* R379 */
0x0000, /* R380 */
0x0000, /* R381 */
0x0000, /* R382 */
0x0000, /* R383 */
0x0000, /* R384 */
0x0000, /* R385 */
0x0000, /* R386 */
0x0000, /* R387 */
0x0000, /* R388 */
0x0000, /* R389 */
0x0000, /* R390 */
0x0000, /* R391 */
0x0000, /* R392 */
0x0000, /* R393 */
0x0000, /* R394 */
0x0000, /* R395 */
0x0000, /* R396 */
0x0000, /* R397 */
0x0000, /* R398 */
0x0000, /* R399 */
0x0000, /* R400 */
0x0000, /* R401 */
0x0000, /* R402 */
0x0000, /* R403 */
0x0000, /* R404 */
0x0000, /* R405 */
0x0000, /* R406 */
0x0000, /* R407 */
0x0000, /* R408 */
0x0000, /* R409 */
0x0000, /* R410 */
0x0000, /* R411 */
0x0000, /* R412 */
0x0000, /* R413 */
0x0000, /* R414 */
0x0000, /* R415 */
0x0000, /* R416 */
0x0000, /* R417 */
0x0000, /* R418 */
0x0000, /* R419 */
0x0000, /* R420 */
0x0000, /* R421 */
0x0000, /* R422 */
0x0000, /* R423 */
0x0000, /* R424 */
0x0000, /* R425 */
0x0000, /* R426 */
0x0000, /* R427 */
0x0000, /* R428 */
0x0000, /* R429 */
0x0000, /* R430 */
0x0000, /* R431 */
0x0000, /* R432 */
0x0000, /* R433 */
0x0000, /* R434 */
0x0000, /* R435 */
0x0000, /* R436 */
0x0000, /* R437 */
0x0000, /* R438 */
0x0000, /* R439 */
0x0000, /* R440 */
0x0000, /* R441 */
0x0000, /* R442 */
0x0000, /* R443 */
0x0000, /* R444 */
0x0000, /* R445 */
0x0000, /* R446 */
0x0000, /* R447 */
0x0000, /* R448 */
0x0000, /* R449 */
0x0000, /* R450 */
0x0000, /* R451 */
0x0000, /* R452 */
0x0000, /* R453 */
0x0000, /* R454 */
0x0000, /* R455 */
0x0000, /* R456 */
0x0000, /* R457 */
0x0000, /* R458 */
0x0000, /* R459 */
0x0000, /* R460 */
0x0000, /* R461 */
0x0000, /* R462 */
0x0000, /* R463 */
0x0000, /* R464 */
0x0000, /* R465 */
0x0000, /* R466 */
0x0000, /* R467 */
0x0000, /* R468 */
0x0000, /* R469 */
0x0000, /* R470 */
0x0000, /* R471 */
0x0000, /* R472 */
0x0000, /* R473 */
0x0000, /* R474 */
0x0000, /* R475 */
0x0000, /* R476 */
0x0000, /* R477 */
0x0000, /* R478 */
0x0000, /* R479 */
0x0000, /* R480 */
0x0000, /* R481 */
0x0000, /* R482 */
0x0000, /* R483 */
0x0000, /* R484 */
0x0000, /* R485 */
0x0000, /* R486 */
0x0000, /* R487 */
0x0000, /* R488 */
0x0000, /* R489 */
0x0000, /* R490 */
0x0000, /* R491 */
0x0000, /* R492 */
0x0000, /* R493 */
0x0000, /* R494 */
0x0000, /* R495 */
0x0000, /* R496 */
0x0000, /* R497 */
0x0000, /* R498 */
0x0000, /* R499 */
0x0000, /* R500 */
0x0000, /* R501 */
0x0000, /* R502 */
0x0000, /* R503 */
0x0000, /* R504 */
0x0000, /* R505 */
0x0000, /* R506 */
0x0000, /* R507 */
0x0000, /* R508 */
0x0000, /* R509 */
0x0000, /* R510 */
0x0000, /* R511 */
0x0000, /* R512 - AIF1 Clocking (1) */
0x0000, /* R513 - AIF1 Clocking (2) */
0x0000, /* R514 */
0x0000, /* R515 */
0x0000, /* R516 - AIF2 Clocking (1) */
0x0000, /* R517 - AIF2 Clocking (2) */
0x0000, /* R518 */
0x0000, /* R519 */
0x0000, /* R520 - Clocking (1) */
0x0000, /* R521 - Clocking (2) */
0x0000, /* R522 */
0x0000, /* R523 */
0x0000, /* R524 */
0x0000, /* R525 */
0x0000, /* R526 */
0x0000, /* R527 */
0x0083, /* R528 - AIF1 Rate */
0x0083, /* R529 - AIF2 Rate */
0x0000, /* R530 - Rate Status */
0x0000, /* R531 */
0x0000, /* R532 */
0x0000, /* R533 */
0x0000, /* R534 */
0x0000, /* R535 */
0x0000, /* R536 */
0x0000, /* R537 */
0x0000, /* R538 */
0x0000, /* R539 */
0x0000, /* R540 */
0x0000, /* R541 */
0x0000, /* R542 */
0x0000, /* R543 */
0x0000, /* R544 - FLL1 Control (1) */
0x0000, /* R545 - FLL1 Control (2) */
0x0000, /* R546 - FLL1 Control (3) */
0x0000, /* R547 - FLL1 Control (4) */
0x0C80, /* R548 - FLL1 Control (5) */
0x0000, /* R549 */
0x0000, /* R550 */
0x0000, /* R551 */
0x0000, /* R552 */
0x0000, /* R553 */
0x0000, /* R554 */
0x0000, /* R555 */
0x0000, /* R556 */
0x0000, /* R557 */
0x0000, /* R558 */
0x0000, /* R559 */
0x0000, /* R560 */
0x0000, /* R561 */
0x0000, /* R562 */
0x0000, /* R563 */
0x0000, /* R564 */
0x0000, /* R565 */
0x0000, /* R566 */
0x0000, /* R567 */
0x0000, /* R568 */
0x0000, /* R569 */
0x0000, /* R570 */
0x0000, /* R571 */
0x0000, /* R572 */
0x0000, /* R573 */
0x0000, /* R574 */
0x0000, /* R575 */
0x0000, /* R576 - FLL2 Control (1) */
0x0000, /* R577 - FLL2 Control (2) */
0x0000, /* R578 - FLL2 Control (3) */
0x0000, /* R579 - FLL2 Control (4) */
0x0C80, /* R580 - FLL2 Control (5) */
0x0000, /* R581 */
0x0000, /* R582 */
0x0000, /* R583 */
0x0000, /* R584 */
0x0000, /* R585 */
0x0000, /* R586 */
0x0000, /* R587 */
0x0000, /* R588 */
0x0000, /* R589 */
0x0000, /* R590 */
0x0000, /* R591 */
0x0000, /* R592 */
0x0000, /* R593 */
0x0000, /* R594 */
0x0000, /* R595 */
0x0000, /* R596 */
0x0000, /* R597 */
0x0000, /* R598 */
0x0000, /* R599 */
0x0000, /* R600 */
0x0000, /* R601 */
0x0000, /* R602 */
0x0000, /* R603 */
0x0000, /* R604 */
0x0000, /* R605 */
0x0000, /* R606 */
0x0000, /* R607 */
0x0000, /* R608 */
0x0000, /* R609 */
0x0000, /* R610 */
0x0000, /* R611 */
0x0000, /* R612 */
0x0000, /* R613 */
0x0000, /* R614 */
0x0000, /* R615 */
0x0000, /* R616 */
0x0000, /* R617 */
0x0000, /* R618 */
0x0000, /* R619 */
0x0000, /* R620 */
0x0000, /* R621 */
0x0000, /* R622 */
0x0000, /* R623 */
0x0000, /* R624 */
0x0000, /* R625 */
0x0000, /* R626 */
0x0000, /* R627 */
0x0000, /* R628 */
0x0000, /* R629 */
0x0000, /* R630 */
0x0000, /* R631 */
0x0000, /* R632 */
0x0000, /* R633 */
0x0000, /* R634 */
0x0000, /* R635 */
0x0000, /* R636 */
0x0000, /* R637 */
0x0000, /* R638 */
0x0000, /* R639 */
0x0000, /* R640 */
0x0000, /* R641 */
0x0000, /* R642 */
0x0000, /* R643 */
0x0000, /* R644 */
0x0000, /* R645 */
0x0000, /* R646 */
0x0000, /* R647 */
0x0000, /* R648 */
0x0000, /* R649 */
0x0000, /* R650 */
0x0000, /* R651 */
0x0000, /* R652 */
0x0000, /* R653 */
0x0000, /* R654 */
0x0000, /* R655 */
0x0000, /* R656 */
0x0000, /* R657 */
0x0000, /* R658 */
0x0000, /* R659 */
0x0000, /* R660 */
0x0000, /* R661 */
0x0000, /* R662 */
0x0000, /* R663 */
0x0000, /* R664 */
0x0000, /* R665 */
0x0000, /* R666 */
0x0000, /* R667 */
0x0000, /* R668 */
0x0000, /* R669 */
0x0000, /* R670 */
0x0000, /* R671 */
0x0000, /* R672 */
0x0000, /* R673 */
0x0000, /* R674 */
0x0000, /* R675 */
0x0000, /* R676 */
0x0000, /* R677 */
0x0000, /* R678 */
0x0000, /* R679 */
0x0000, /* R680 */
0x0000, /* R681 */
0x0000, /* R682 */
0x0000, /* R683 */
0x0000, /* R684 */
0x0000, /* R685 */
0x0000, /* R686 */
0x0000, /* R687 */
0x0000, /* R688 */
0x0000, /* R689 */
0x0000, /* R690 */
0x0000, /* R691 */
0x0000, /* R692 */
0x0000, /* R693 */
0x0000, /* R694 */
0x0000, /* R695 */
0x0000, /* R696 */
0x0000, /* R697 */
0x0000, /* R698 */
0x0000, /* R699 */
0x0000, /* R700 */
0x0000, /* R701 */
0x0000, /* R702 */
0x0000, /* R703 */
0x0000, /* R704 */
0x0000, /* R705 */
0x0000, /* R706 */
0x0000, /* R707 */
0x0000, /* R708 */
0x0000, /* R709 */
0x0000, /* R710 */
0x0000, /* R711 */
0x0000, /* R712 */
0x0000, /* R713 */
0x0000, /* R714 */
0x0000, /* R715 */
0x0000, /* R716 */
0x0000, /* R717 */
0x0000, /* R718 */
0x0000, /* R719 */
0x0000, /* R720 */
0x0000, /* R721 */
0x0000, /* R722 */
0x0000, /* R723 */
0x0000, /* R724 */
0x0000, /* R725 */
0x0000, /* R726 */
0x0000, /* R727 */
0x0000, /* R728 */
0x0000, /* R729 */
0x0000, /* R730 */
0x0000, /* R731 */
0x0000, /* R732 */
0x0000, /* R733 */
0x0000, /* R734 */
0x0000, /* R735 */
0x0000, /* R736 */
0x0000, /* R737 */
0x0000, /* R738 */
0x0000, /* R739 */
0x0000, /* R740 */
0x0000, /* R741 */
0x0000, /* R742 */
0x0000, /* R743 */
0x0000, /* R744 */
0x0000, /* R745 */
0x0000, /* R746 */
0x0000, /* R747 */
0x0000, /* R748 */
0x0000, /* R749 */
0x0000, /* R750 */
0x0000, /* R751 */
0x0000, /* R752 */
0x0000, /* R753 */
0x0000, /* R754 */
0x0000, /* R755 */
0x0000, /* R756 */
0x0000, /* R757 */
0x0000, /* R758 */
0x0000, /* R759 */
0x0000, /* R760 */
0x0000, /* R761 */
0x0000, /* R762 */
0x0000, /* R763 */
0x0000, /* R764 */
0x0000, /* R765 */
0x0000, /* R766 */
0x0000, /* R767 */
0x4050, /* R768 - AIF1 Control (1) */
0x4000, /* R769 - AIF1 Control (2) */
0x0000, /* R770 - AIF1 Master/Slave */
0x0040, /* R771 - AIF1 BCLK */
0x0040, /* R772 - AIF1ADC LRCLK */
0x0040, /* R773 - AIF1DAC LRCLK */
0x0004, /* R774 - AIF1DAC Data */
0x0100, /* R775 - AIF1ADC Data */
0x0000, /* R776 */
0x0000, /* R777 */
0x0000, /* R778 */
0x0000, /* R779 */
0x0000, /* R780 */
0x0000, /* R781 */
0x0000, /* R782 */
0x0000, /* R783 */
0x4050, /* R784 - AIF2 Control (1) */
0x4000, /* R785 - AIF2 Control (2) */
0x0000, /* R786 - AIF2 Master/Slave */
0x0040, /* R787 - AIF2 BCLK */
0x0040, /* R788 - AIF2ADC LRCLK */
0x0040, /* R789 - AIF2DAC LRCLK */
0x0000, /* R790 - AIF2DAC Data */
0x0000, /* R791 - AIF2ADC Data */
0x0000, /* R792 */
0x0000, /* R793 */
0x0000, /* R794 */
0x0000, /* R795 */
0x0000, /* R796 */
0x0000, /* R797 */
0x0000, /* R798 */
0x0000, /* R799 */
0x0000, /* R800 */
0x0000, /* R801 */
0x0000, /* R802 */
0x0000, /* R803 */
0x0000, /* R804 */
0x0000, /* R805 */
0x0000, /* R806 */
0x0000, /* R807 */
0x0000, /* R808 */
0x0000, /* R809 */
0x0000, /* R810 */
0x0000, /* R811 */
0x0000, /* R812 */
0x0000, /* R813 */
0x0000, /* R814 */
0x0000, /* R815 */
0x0000, /* R816 */
0x0000, /* R817 */
0x0000, /* R818 */
0x0000, /* R819 */
0x0000, /* R820 */
0x0000, /* R821 */
0x0000, /* R822 */
0x0000, /* R823 */
0x0000, /* R824 */
0x0000, /* R825 */
0x0000, /* R826 */
0x0000, /* R827 */
0x0000, /* R828 */
0x0000, /* R829 */
0x0000, /* R830 */
0x0000, /* R831 */
0x0000, /* R832 */
0x0000, /* R833 */
0x0000, /* R834 */
0x0000, /* R835 */
0x0000, /* R836 */
0x0000, /* R837 */
0x0000, /* R838 */
0x0000, /* R839 */
0x0000, /* R840 */
0x0000, /* R841 */
0x0000, /* R842 */
0x0000, /* R843 */
0x0000, /* R844 */
0x0000, /* R845 */
0x0000, /* R846 */
0x0000, /* R847 */
0x0000, /* R848 */
0x0000, /* R849 */
0x0000, /* R850 */
0x0000, /* R851 */
0x0000, /* R852 */
0x0000, /* R853 */
0x0000, /* R854 */
0x0000, /* R855 */
0x0000, /* R856 */
0x0000, /* R857 */
0x0000, /* R858 */
0x0000, /* R859 */
0x0000, /* R860 */
0x0000, /* R861 */
0x0000, /* R862 */
0x0000, /* R863 */
0x0000, /* R864 */
0x0000, /* R865 */
0x0000, /* R866 */
0x0000, /* R867 */
0x0000, /* R868 */
0x0000, /* R869 */
0x0000, /* R870 */
0x0000, /* R871 */
0x0000, /* R872 */
0x0000, /* R873 */
0x0000, /* R874 */
0x0000, /* R875 */
0x0000, /* R876 */
0x0000, /* R877 */
0x0000, /* R878 */
0x0000, /* R879 */
0x0000, /* R880 */
0x0000, /* R881 */
0x0000, /* R882 */
0x0000, /* R883 */
0x0000, /* R884 */
0x0000, /* R885 */
0x0000, /* R886 */
0x0000, /* R887 */
0x0000, /* R888 */
0x0000, /* R889 */
0x0000, /* R890 */
0x0000, /* R891 */
0x0000, /* R892 */
0x0000, /* R893 */
0x0000, /* R894 */
0x0000, /* R895 */
0x0000, /* R896 */
0x0000, /* R897 */
0x0000, /* R898 */
0x0000, /* R899 */
0x0000, /* R900 */
0x0000, /* R901 */
0x0000, /* R902 */
0x0000, /* R903 */
0x0000, /* R904 */
0x0000, /* R905 */
0x0000, /* R906 */
0x0000, /* R907 */
0x0000, /* R908 */
0x0000, /* R909 */
0x0000, /* R910 */
0x0000, /* R911 */
0x0000, /* R912 */
0x0000, /* R913 */
0x0000, /* R914 */
0x0000, /* R915 */
0x0000, /* R916 */
0x0000, /* R917 */
0x0000, /* R918 */
0x0000, /* R919 */
0x0000, /* R920 */
0x0000, /* R921 */
0x0000, /* R922 */
0x0000, /* R923 */
0x0000, /* R924 */
0x0000, /* R925 */
0x0000, /* R926 */
0x0000, /* R927 */
0x0000, /* R928 */
0x0000, /* R929 */
0x0000, /* R930 */
0x0000, /* R931 */
0x0000, /* R932 */
0x0000, /* R933 */
0x0000, /* R934 */
0x0000, /* R935 */
0x0000, /* R936 */
0x0000, /* R937 */
0x0000, /* R938 */
0x0000, /* R939 */
0x0000, /* R940 */
0x0000, /* R941 */
0x0000, /* R942 */
0x0000, /* R943 */
0x0000, /* R944 */
0x0000, /* R945 */
0x0000, /* R946 */
0x0000, /* R947 */
0x0000, /* R948 */
0x0000, /* R949 */
0x0000, /* R950 */
0x0000, /* R951 */
0x0000, /* R952 */
0x0000, /* R953 */
0x0000, /* R954 */
0x0000, /* R955 */
0x0000, /* R956 */
0x0000, /* R957 */
0x0000, /* R958 */
0x0000, /* R959 */
0x0000, /* R960 */
0x0000, /* R961 */
0x0000, /* R962 */
0x0000, /* R963 */
0x0000, /* R964 */
0x0000, /* R965 */
0x0000, /* R966 */
0x0000, /* R967 */
0x0000, /* R968 */
0x0000, /* R969 */
0x0000, /* R970 */
0x0000, /* R971 */
0x0000, /* R972 */
0x0000, /* R973 */
0x0000, /* R974 */
0x0000, /* R975 */
0x0000, /* R976 */
0x0000, /* R977 */
0x0000, /* R978 */
0x0000, /* R979 */
0x0000, /* R980 */
0x0000, /* R981 */
0x0000, /* R982 */
0x0000, /* R983 */
0x0000, /* R984 */
0x0000, /* R985 */
0x0000, /* R986 */
0x0000, /* R987 */
0x0000, /* R988 */
0x0000, /* R989 */
0x0000, /* R990 */
0x0000, /* R991 */
0x0000, /* R992 */
0x0000, /* R993 */
0x0000, /* R994 */
0x0000, /* R995 */
0x0000, /* R996 */
0x0000, /* R997 */
0x0000, /* R998 */
0x0000, /* R999 */
0x0000, /* R1000 */
0x0000, /* R1001 */
0x0000, /* R1002 */
0x0000, /* R1003 */
0x0000, /* R1004 */
0x0000, /* R1005 */
0x0000, /* R1006 */
0x0000, /* R1007 */
0x0000, /* R1008 */
0x0000, /* R1009 */
0x0000, /* R1010 */
0x0000, /* R1011 */
0x0000, /* R1012 */
0x0000, /* R1013 */
0x0000, /* R1014 */
0x0000, /* R1015 */
0x0000, /* R1016 */
0x0000, /* R1017 */
0x0000, /* R1018 */
0x0000, /* R1019 */
0x0000, /* R1020 */
0x0000, /* R1021 */
0x0000, /* R1022 */
0x0000, /* R1023 */
0x00C0, /* R1024 - AIF1 ADC1 Left Volume */
0x00C0, /* R1025 - AIF1 ADC1 Right Volume */
0x00C0, /* R1026 - AIF1 DAC1 Left Volume */
0x00C0, /* R1027 - AIF1 DAC1 Right Volume */
0x00C0, /* R1028 - AIF1 ADC2 Left Volume */
0x00C0, /* R1029 - AIF1 ADC2 Right Volume */
0x00C0, /* R1030 - AIF1 DAC2 Left Volume */
0x00C0, /* R1031 - AIF1 DAC2 Right Volume */
0x0000, /* R1032 */
0x0000, /* R1033 */
0x0000, /* R1034 */
0x0000, /* R1035 */
0x0000, /* R1036 */
0x0000, /* R1037 */
0x0000, /* R1038 */
0x0000, /* R1039 */
0x0000, /* R1040 - AIF1 ADC1 Filters */
0x0000, /* R1041 - AIF1 ADC2 Filters */
0x0000, /* R1042 */
0x0000, /* R1043 */
0x0000, /* R1044 */
0x0000, /* R1045 */
0x0000, /* R1046 */
0x0000, /* R1047 */
0x0000, /* R1048 */
0x0000, /* R1049 */
0x0000, /* R1050 */
0x0000, /* R1051 */
0x0000, /* R1052 */
0x0000, /* R1053 */
0x0000, /* R1054 */
0x0000, /* R1055 */
0x0200, /* R1056 - AIF1 DAC1 Filters (1) */
0x0010, /* R1057 - AIF1 DAC1 Filters (2) */
0x0200, /* R1058 - AIF1 DAC2 Filters (1) */
0x0010, /* R1059 - AIF1 DAC2 Filters (2) */
0x0000, /* R1060 */
0x0000, /* R1061 */
0x0000, /* R1062 */
0x0000, /* R1063 */
0x0000, /* R1064 */
0x0000, /* R1065 */
0x0000, /* R1066 */
0x0000, /* R1067 */
0x0000, /* R1068 */
0x0000, /* R1069 */
0x0000, /* R1070 */
0x0000, /* R1071 */
0x0000, /* R1072 */
0x0000, /* R1073 */
0x0000, /* R1074 */
0x0000, /* R1075 */
0x0000, /* R1076 */
0x0000, /* R1077 */
0x0000, /* R1078 */
0x0000, /* R1079 */
0x0000, /* R1080 */
0x0000, /* R1081 */
0x0000, /* R1082 */
0x0000, /* R1083 */
0x0000, /* R1084 */
0x0000, /* R1085 */
0x0000, /* R1086 */
0x0000, /* R1087 */
0x0098, /* R1088 - AIF1 DRC1 (1) */
0x0845, /* R1089 - AIF1 DRC1 (2) */
0x0000, /* R1090 - AIF1 DRC1 (3) */
0x0000, /* R1091 - AIF1 DRC1 (4) */
0x0000, /* R1092 - AIF1 DRC1 (5) */
0x0000, /* R1093 */
0x0000, /* R1094 */
0x0000, /* R1095 */
0x0000, /* R1096 */
0x0000, /* R1097 */
0x0000, /* R1098 */
0x0000, /* R1099 */
0x0000, /* R1100 */
0x0000, /* R1101 */
0x0000, /* R1102 */
0x0000, /* R1103 */
0x0098, /* R1104 - AIF1 DRC2 (1) */
0x0845, /* R1105 - AIF1 DRC2 (2) */
0x0000, /* R1106 - AIF1 DRC2 (3) */
0x0000, /* R1107 - AIF1 DRC2 (4) */
0x0000, /* R1108 - AIF1 DRC2 (5) */
0x0000, /* R1109 */
0x0000, /* R1110 */
0x0000, /* R1111 */
0x0000, /* R1112 */
0x0000, /* R1113 */
0x0000, /* R1114 */
0x0000, /* R1115 */
0x0000, /* R1116 */
0x0000, /* R1117 */
0x0000, /* R1118 */
0x0000, /* R1119 */
0x0000, /* R1120 */
0x0000, /* R1121 */
0x0000, /* R1122 */
0x0000, /* R1123 */
0x0000, /* R1124 */
0x0000, /* R1125 */
0x0000, /* R1126 */
0x0000, /* R1127 */
0x0000, /* R1128 */
0x0000, /* R1129 */
0x0000, /* R1130 */
0x0000, /* R1131 */
0x0000, /* R1132 */
0x0000, /* R1133 */
0x0000, /* R1134 */
0x0000, /* R1135 */
0x0000, /* R1136 */
0x0000, /* R1137 */
0x0000, /* R1138 */
0x0000, /* R1139 */
0x0000, /* R1140 */
0x0000, /* R1141 */
0x0000, /* R1142 */
0x0000, /* R1143 */
0x0000, /* R1144 */
0x0000, /* R1145 */
0x0000, /* R1146 */
0x0000, /* R1147 */
0x0000, /* R1148 */
0x0000, /* R1149 */
0x0000, /* R1150 */
0x0000, /* R1151 */
0x6318, /* R1152 - AIF1 DAC1 EQ Gains (1) */
0x6300, /* R1153 - AIF1 DAC1 EQ Gains (2) */
0x0FCA, /* R1154 - AIF1 DAC1 EQ Band 1 A */
0x0400, /* R1155 - AIF1 DAC1 EQ Band 1 B */
0x00D8, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
0x1EB5, /* R1157 - AIF1 DAC1 EQ Band 2 A */
0xF145, /* R1158 - AIF1 DAC1 EQ Band 2 B */
0x0B75, /* R1159 - AIF1 DAC1 EQ Band 2 C */
0x01C5, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
0x1C58, /* R1161 - AIF1 DAC1 EQ Band 3 A */
0xF373, /* R1162 - AIF1 DAC1 EQ Band 3 B */
0x0A54, /* R1163 - AIF1 DAC1 EQ Band 3 C */
0x0558, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
0x168E, /* R1165 - AIF1 DAC1 EQ Band 4 A */
0xF829, /* R1166 - AIF1 DAC1 EQ Band 4 B */
0x07AD, /* R1167 - AIF1 DAC1 EQ Band 4 C */
0x1103, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
0x0564, /* R1169 - AIF1 DAC1 EQ Band 5 A */
0x0559, /* R1170 - AIF1 DAC1 EQ Band 5 B */
0x4000, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
0x0000, /* R1172 */
0x0000, /* R1173 */
0x0000, /* R1174 */
0x0000, /* R1175 */
0x0000, /* R1176 */
0x0000, /* R1177 */
0x0000, /* R1178 */
0x0000, /* R1179 */
0x0000, /* R1180 */
0x0000, /* R1181 */
0x0000, /* R1182 */
0x0000, /* R1183 */
0x6318, /* R1184 - AIF1 DAC2 EQ Gains (1) */
0x6300, /* R1185 - AIF1 DAC2 EQ Gains (2) */
0x0FCA, /* R1186 - AIF1 DAC2 EQ Band 1 A */
0x0400, /* R1187 - AIF1 DAC2 EQ Band 1 B */
0x00D8, /* R1188 - AIF1 DAC2 EQ Band 1 PG */
0x1EB5, /* R1189 - AIF1 DAC2 EQ Band 2 A */
0xF145, /* R1190 - AIF1 DAC2 EQ Band 2 B */
0x0B75, /* R1191 - AIF1 DAC2 EQ Band 2 C */
0x01C5, /* R1192 - AIF1 DAC2 EQ Band 2 PG */
0x1C58, /* R1193 - AIF1 DAC2 EQ Band 3 A */
0xF373, /* R1194 - AIF1 DAC2 EQ Band 3 B */
0x0A54, /* R1195 - AIF1 DAC2 EQ Band 3 C */
0x0558, /* R1196 - AIF1 DAC2 EQ Band 3 PG */
0x168E, /* R1197 - AIF1 DAC2 EQ Band 4 A */
0xF829, /* R1198 - AIF1 DAC2 EQ Band 4 B */
0x07AD, /* R1199 - AIF1 DAC2 EQ Band 4 C */
0x1103, /* R1200 - AIF1 DAC2 EQ Band 4 PG */
0x0564, /* R1201 - AIF1 DAC2 EQ Band 5 A */
0x0559, /* R1202 - AIF1 DAC2 EQ Band 5 B */
0x4000, /* R1203 - AIF1 DAC2 EQ Band 5 PG */
0x0000, /* R1204 */
0x0000, /* R1205 */
0x0000, /* R1206 */
0x0000, /* R1207 */
0x0000, /* R1208 */
0x0000, /* R1209 */
0x0000, /* R1210 */
0x0000, /* R1211 */
0x0000, /* R1212 */
0x0000, /* R1213 */
0x0000, /* R1214 */
0x0000, /* R1215 */
0x0000, /* R1216 */
0x0000, /* R1217 */
0x0000, /* R1218 */
0x0000, /* R1219 */
0x0000, /* R1220 */
0x0000, /* R1221 */
0x0000, /* R1222 */
0x0000, /* R1223 */
0x0000, /* R1224 */
0x0000, /* R1225 */
0x0000, /* R1226 */
0x0000, /* R1227 */
0x0000, /* R1228 */
0x0000, /* R1229 */
0x0000, /* R1230 */
0x0000, /* R1231 */
0x0000, /* R1232 */
0x0000, /* R1233 */
0x0000, /* R1234 */
0x0000, /* R1235 */
0x0000, /* R1236 */
0x0000, /* R1237 */
0x0000, /* R1238 */
0x0000, /* R1239 */
0x0000, /* R1240 */
0x0000, /* R1241 */
0x0000, /* R1242 */
0x0000, /* R1243 */
0x0000, /* R1244 */
0x0000, /* R1245 */
0x0000, /* R1246 */
0x0000, /* R1247 */
0x0000, /* R1248 */
0x0000, /* R1249 */
0x0000, /* R1250 */
0x0000, /* R1251 */
0x0000, /* R1252 */
0x0000, /* R1253 */
0x0000, /* R1254 */
0x0000, /* R1255 */
0x0000, /* R1256 */
0x0000, /* R1257 */
0x0000, /* R1258 */
0x0000, /* R1259 */
0x0000, /* R1260 */
0x0000, /* R1261 */
0x0000, /* R1262 */
0x0000, /* R1263 */
0x0000, /* R1264 */
0x0000, /* R1265 */
0x0000, /* R1266 */
0x0000, /* R1267 */
0x0000, /* R1268 */
0x0000, /* R1269 */
0x0000, /* R1270 */
0x0000, /* R1271 */
0x0000, /* R1272 */
0x0000, /* R1273 */
0x0000, /* R1274 */
0x0000, /* R1275 */
0x0000, /* R1276 */
0x0000, /* R1277 */
0x0000, /* R1278 */
0x0000, /* R1279 */
0x00C0, /* R1280 - AIF2 ADC Left Volume */
0x00C0, /* R1281 - AIF2 ADC Right Volume */
0x00C0, /* R1282 - AIF2 DAC Left Volume */
0x00C0, /* R1283 - AIF2 DAC Right Volume */
0x0000, /* R1284 */
0x0000, /* R1285 */
0x0000, /* R1286 */
0x0000, /* R1287 */
0x0000, /* R1288 */
0x0000, /* R1289 */
0x0000, /* R1290 */
0x0000, /* R1291 */
0x0000, /* R1292 */
0x0000, /* R1293 */
0x0000, /* R1294 */
0x0000, /* R1295 */
0x0000, /* R1296 - AIF2 ADC Filters */
0x0000, /* R1297 */
0x0000, /* R1298 */
0x0000, /* R1299 */
0x0000, /* R1300 */
0x0000, /* R1301 */
0x0000, /* R1302 */
0x0000, /* R1303 */
0x0000, /* R1304 */
0x0000, /* R1305 */
0x0000, /* R1306 */
0x0000, /* R1307 */
0x0000, /* R1308 */
0x0000, /* R1309 */
0x0000, /* R1310 */
0x0000, /* R1311 */
0x0200, /* R1312 - AIF2 DAC Filters (1) */
0x0010, /* R1313 - AIF2 DAC Filters (2) */
0x0000, /* R1314 */
0x0000, /* R1315 */
0x0000, /* R1316 */
0x0000, /* R1317 */
0x0000, /* R1318 */
0x0000, /* R1319 */
0x0000, /* R1320 */
0x0000, /* R1321 */
0x0000, /* R1322 */
0x0000, /* R1323 */
0x0000, /* R1324 */
0x0000, /* R1325 */
0x0000, /* R1326 */
0x0000, /* R1327 */
0x0000, /* R1328 */
0x0000, /* R1329 */
0x0000, /* R1330 */
0x0000, /* R1331 */
0x0000, /* R1332 */
0x0000, /* R1333 */
0x0000, /* R1334 */
0x0000, /* R1335 */
0x0000, /* R1336 */
0x0000, /* R1337 */
0x0000, /* R1338 */
0x0000, /* R1339 */
0x0000, /* R1340 */
0x0000, /* R1341 */
0x0000, /* R1342 */
0x0000, /* R1343 */
0x0098, /* R1344 - AIF2 DRC (1) */
0x0845, /* R1345 - AIF2 DRC (2) */
0x0000, /* R1346 - AIF2 DRC (3) */
0x0000, /* R1347 - AIF2 DRC (4) */
0x0000, /* R1348 - AIF2 DRC (5) */
0x0000, /* R1349 */
0x0000, /* R1350 */
0x0000, /* R1351 */
0x0000, /* R1352 */
0x0000, /* R1353 */
0x0000, /* R1354 */
0x0000, /* R1355 */
0x0000, /* R1356 */
0x0000, /* R1357 */
0x0000, /* R1358 */
0x0000, /* R1359 */
0x0000, /* R1360 */
0x0000, /* R1361 */
0x0000, /* R1362 */
0x0000, /* R1363 */
0x0000, /* R1364 */
0x0000, /* R1365 */
0x0000, /* R1366 */
0x0000, /* R1367 */
0x0000, /* R1368 */
0x0000, /* R1369 */
0x0000, /* R1370 */
0x0000, /* R1371 */
0x0000, /* R1372 */
0x0000, /* R1373 */
0x0000, /* R1374 */
0x0000, /* R1375 */
0x0000, /* R1376 */
0x0000, /* R1377 */
0x0000, /* R1378 */
0x0000, /* R1379 */
0x0000, /* R1380 */
0x0000, /* R1381 */
0x0000, /* R1382 */
0x0000, /* R1383 */
0x0000, /* R1384 */
0x0000, /* R1385 */
0x0000, /* R1386 */
0x0000, /* R1387 */
0x0000, /* R1388 */
0x0000, /* R1389 */
0x0000, /* R1390 */
0x0000, /* R1391 */
0x0000, /* R1392 */
0x0000, /* R1393 */
0x0000, /* R1394 */
0x0000, /* R1395 */
0x0000, /* R1396 */
0x0000, /* R1397 */
0x0000, /* R1398 */
0x0000, /* R1399 */
0x0000, /* R1400 */
0x0000, /* R1401 */
0x0000, /* R1402 */
0x0000, /* R1403 */
0x0000, /* R1404 */
0x0000, /* R1405 */
0x0000, /* R1406 */
0x0000, /* R1407 */
0x6318, /* R1408 - AIF2 EQ Gains (1) */
0x6300, /* R1409 - AIF2 EQ Gains (2) */
0x0FCA, /* R1410 - AIF2 EQ Band 1 A */
0x0400, /* R1411 - AIF2 EQ Band 1 B */
0x00D8, /* R1412 - AIF2 EQ Band 1 PG */
0x1EB5, /* R1413 - AIF2 EQ Band 2 A */
0xF145, /* R1414 - AIF2 EQ Band 2 B */
0x0B75, /* R1415 - AIF2 EQ Band 2 C */
0x01C5, /* R1416 - AIF2 EQ Band 2 PG */
0x1C58, /* R1417 - AIF2 EQ Band 3 A */
0xF373, /* R1418 - AIF2 EQ Band 3 B */
0x0A54, /* R1419 - AIF2 EQ Band 3 C */
0x0558, /* R1420 - AIF2 EQ Band 3 PG */
0x168E, /* R1421 - AIF2 EQ Band 4 A */
0xF829, /* R1422 - AIF2 EQ Band 4 B */
0x07AD, /* R1423 - AIF2 EQ Band 4 C */
0x1103, /* R1424 - AIF2 EQ Band 4 PG */
0x0564, /* R1425 - AIF2 EQ Band 5 A */
0x0559, /* R1426 - AIF2 EQ Band 5 B */
0x4000, /* R1427 - AIF2 EQ Band 5 PG */
0x0000, /* R1428 */
0x0000, /* R1429 */
0x0000, /* R1430 */
0x0000, /* R1431 */
0x0000, /* R1432 */
0x0000, /* R1433 */
0x0000, /* R1434 */
0x0000, /* R1435 */
0x0000, /* R1436 */
0x0000, /* R1437 */
0x0000, /* R1438 */
0x0000, /* R1439 */
0x0000, /* R1440 */
0x0000, /* R1441 */
0x0000, /* R1442 */
0x0000, /* R1443 */
0x0000, /* R1444 */
0x0000, /* R1445 */
0x0000, /* R1446 */
0x0000, /* R1447 */
0x0000, /* R1448 */
0x0000, /* R1449 */
0x0000, /* R1450 */
0x0000, /* R1451 */
0x0000, /* R1452 */
0x0000, /* R1453 */
0x0000, /* R1454 */
0x0000, /* R1455 */
0x0000, /* R1456 */
0x0000, /* R1457 */
0x0000, /* R1458 */
0x0000, /* R1459 */
0x0000, /* R1460 */
0x0000, /* R1461 */
0x0000, /* R1462 */
0x0000, /* R1463 */
0x0000, /* R1464 */
0x0000, /* R1465 */
0x0000, /* R1466 */
0x0000, /* R1467 */
0x0000, /* R1468 */
0x0000, /* R1469 */
0x0000, /* R1470 */
0x0000, /* R1471 */
0x0000, /* R1472 */
0x0000, /* R1473 */
0x0000, /* R1474 */
0x0000, /* R1475 */
0x0000, /* R1476 */
0x0000, /* R1477 */
0x0000, /* R1478 */
0x0000, /* R1479 */
0x0000, /* R1480 */
0x0000, /* R1481 */
0x0000, /* R1482 */
0x0000, /* R1483 */
0x0000, /* R1484 */
0x0000, /* R1485 */
0x0000, /* R1486 */
0x0000, /* R1487 */
0x0000, /* R1488 */
0x0000, /* R1489 */
0x0000, /* R1490 */
0x0000, /* R1491 */
0x0000, /* R1492 */
0x0000, /* R1493 */
0x0000, /* R1494 */
0x0000, /* R1495 */
0x0000, /* R1496 */
0x0000, /* R1497 */
0x0000, /* R1498 */
0x0000, /* R1499 */
0x0000, /* R1500 */
0x0000, /* R1501 */
0x0000, /* R1502 */
0x0000, /* R1503 */
0x0000, /* R1504 */
0x0000, /* R1505 */
0x0000, /* R1506 */
0x0000, /* R1507 */
0x0000, /* R1508 */
0x0000, /* R1509 */
0x0000, /* R1510 */
0x0000, /* R1511 */
0x0000, /* R1512 */
0x0000, /* R1513 */
0x0000, /* R1514 */
0x0000, /* R1515 */
0x0000, /* R1516 */
0x0000, /* R1517 */
0x0000, /* R1518 */
0x0000, /* R1519 */
0x0000, /* R1520 */
0x0000, /* R1521 */
0x0000, /* R1522 */
0x0000, /* R1523 */
0x0000, /* R1524 */
0x0000, /* R1525 */
0x0000, /* R1526 */
0x0000, /* R1527 */
0x0000, /* R1528 */
0x0000, /* R1529 */
0x0000, /* R1530 */
0x0000, /* R1531 */
0x0000, /* R1532 */
0x0000, /* R1533 */
0x0000, /* R1534 */
0x0000, /* R1535 */
0x0000, /* R1536 - DAC1 Mixer Volumes */
0x0000, /* R1537 - DAC1 Left Mixer Routing */
0x0000, /* R1538 - DAC1 Right Mixer Routing */
0x0000, /* R1539 - DAC2 Mixer Volumes */
0x0000, /* R1540 - DAC2 Left Mixer Routing */
0x0000, /* R1541 - DAC2 Right Mixer Routing */
0x0000, /* R1542 - AIF1 ADC1 Left Mixer Routing */
0x0000, /* R1543 - AIF1 ADC1 Right Mixer Routing */
0x0000, /* R1544 - AIF1 ADC2 Left Mixer Routing */
0x0000, /* R1545 - AIF1 ADC2 Right mixer Routing */
0x0000, /* R1546 */
0x0000, /* R1547 */
0x0000, /* R1548 */
0x0000, /* R1549 */
0x0000, /* R1550 */
0x0000, /* R1551 */
0x02C0, /* R1552 - DAC1 Left Volume */
0x02C0, /* R1553 - DAC1 Right Volume */
0x02C0, /* R1554 - DAC2 Left Volume */
0x02C0, /* R1555 - DAC2 Right Volume */
0x0000, /* R1556 - DAC Softmute */
0x0000, /* R1557 */
0x0000, /* R1558 */
0x0000, /* R1559 */
0x0000, /* R1560 */
0x0000, /* R1561 */
0x0000, /* R1562 */
0x0000, /* R1563 */
0x0000, /* R1564 */
0x0000, /* R1565 */
0x0000, /* R1566 */
0x0000, /* R1567 */
0x0002, /* R1568 - Oversampling */
0x0000, /* R1569 - Sidetone */
};
| gpl-2.0 |
Scorpio92/android_kernel_mx2 | fs/ubifs/lpt.c | 2623 | 60108 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Adrian Hunter
* Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file implements the LEB properties tree (LPT) area. The LPT area
* contains the LEB properties tree, a table of LPT area eraseblocks (ltab), and
* (for the "big" model) a table of saved LEB numbers (lsave). The LPT area sits
* between the log and the orphan area.
*
* The LPT area is like a miniature self-contained file system. It is required
* that it never runs out of space, is fast to access and update, and scales
* logarithmically. The LEB properties tree is implemented as a wandering tree
* much like the TNC, and the LPT area has its own garbage collection.
*
* The LPT has two slightly different forms called the "small model" and the
* "big model". The small model is used when the entire LEB properties table
* can be written into a single eraseblock. In that case, garbage collection
* consists of just writing the whole table, which therefore makes all other
* eraseblocks reusable. In the case of the big model, dirty eraseblocks are
* selected for garbage collection, which consists of marking the clean nodes in
* that LEB as dirty, and then only the dirty nodes are written out. Also, in
* the case of the big model, a table of LEB numbers is saved so that the entire
* LPT does not to be scanned looking for empty eraseblocks when UBIFS is first
* mounted.
*/
#include "ubifs.h"
#include <linux/crc16.h>
#include <linux/math64.h>
#include <linux/slab.h>
/**
* do_calc_lpt_geom - calculate sizes for the LPT area.
* @c: the UBIFS file-system description object
*
* Calculate the sizes of LPT bit fields, nodes, and tree, based on the
* properties of the flash and whether LPT is "big" (c->big_lpt).
*/
static void do_calc_lpt_geom(struct ubifs_info *c)
{
int i, n, bits, per_leb_wastage, max_pnode_cnt;
long long sz, tot_wastage;
n = c->main_lebs + c->max_leb_cnt - c->leb_cnt;
max_pnode_cnt = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT);
c->lpt_hght = 1;
n = UBIFS_LPT_FANOUT;
while (n < max_pnode_cnt) {
c->lpt_hght += 1;
n <<= UBIFS_LPT_FANOUT_SHIFT;
}
c->pnode_cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT);
n = DIV_ROUND_UP(c->pnode_cnt, UBIFS_LPT_FANOUT);
c->nnode_cnt = n;
for (i = 1; i < c->lpt_hght; i++) {
n = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT);
c->nnode_cnt += n;
}
c->space_bits = fls(c->leb_size) - 3;
c->lpt_lnum_bits = fls(c->lpt_lebs);
c->lpt_offs_bits = fls(c->leb_size - 1);
c->lpt_spc_bits = fls(c->leb_size);
n = DIV_ROUND_UP(c->max_leb_cnt, UBIFS_LPT_FANOUT);
c->pcnt_bits = fls(n - 1);
c->lnum_bits = fls(c->max_leb_cnt - 1);
bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
(c->big_lpt ? c->pcnt_bits : 0) +
(c->space_bits * 2 + 1) * UBIFS_LPT_FANOUT;
c->pnode_sz = (bits + 7) / 8;
bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
(c->big_lpt ? c->pcnt_bits : 0) +
(c->lpt_lnum_bits + c->lpt_offs_bits) * UBIFS_LPT_FANOUT;
c->nnode_sz = (bits + 7) / 8;
bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
c->lpt_lebs * c->lpt_spc_bits * 2;
c->ltab_sz = (bits + 7) / 8;
bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
c->lnum_bits * c->lsave_cnt;
c->lsave_sz = (bits + 7) / 8;
/* Calculate the minimum LPT size */
c->lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
c->lpt_sz += (long long)c->nnode_cnt * c->nnode_sz;
c->lpt_sz += c->ltab_sz;
if (c->big_lpt)
c->lpt_sz += c->lsave_sz;
/* Add wastage */
sz = c->lpt_sz;
per_leb_wastage = max_t(int, c->pnode_sz, c->nnode_sz);
sz += per_leb_wastage;
tot_wastage = per_leb_wastage;
while (sz > c->leb_size) {
sz += per_leb_wastage;
sz -= c->leb_size;
tot_wastage += per_leb_wastage;
}
tot_wastage += ALIGN(sz, c->min_io_size) - sz;
c->lpt_sz += tot_wastage;
}
/**
* ubifs_calc_lpt_geom - calculate and check sizes for the LPT area.
* @c: the UBIFS file-system description object
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_calc_lpt_geom(struct ubifs_info *c)
{
int lebs_needed;
long long sz;
do_calc_lpt_geom(c);
/* Verify that lpt_lebs is big enough */
sz = c->lpt_sz * 2; /* Must have at least 2 times the size */
lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size);
if (lebs_needed > c->lpt_lebs) {
ubifs_err("too few LPT LEBs");
return -EINVAL;
}
/* Verify that ltab fits in a single LEB (since ltab is a single node */
if (c->ltab_sz > c->leb_size) {
ubifs_err("LPT ltab too big");
return -EINVAL;
}
c->check_lpt_free = c->big_lpt;
return 0;
}
/**
* calc_dflt_lpt_geom - calculate default LPT geometry.
* @c: the UBIFS file-system description object
* @main_lebs: number of main area LEBs is passed and returned here
* @big_lpt: whether the LPT area is "big" is returned here
*
* The size of the LPT area depends on parameters that themselves are dependent
* on the size of the LPT area. This function, successively recalculates the LPT
* area geometry until the parameters and resultant geometry are consistent.
*
* This function returns %0 on success and a negative error code on failure.
*/
static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs,
int *big_lpt)
{
int i, lebs_needed;
long long sz;
/* Start by assuming the minimum number of LPT LEBs */
c->lpt_lebs = UBIFS_MIN_LPT_LEBS;
c->main_lebs = *main_lebs - c->lpt_lebs;
if (c->main_lebs <= 0)
return -EINVAL;
/* And assume we will use the small LPT model */
c->big_lpt = 0;
/*
* Calculate the geometry based on assumptions above and then see if it
* makes sense
*/
do_calc_lpt_geom(c);
/* Small LPT model must have lpt_sz < leb_size */
if (c->lpt_sz > c->leb_size) {
/* Nope, so try again using big LPT model */
c->big_lpt = 1;
do_calc_lpt_geom(c);
}
/* Now check there are enough LPT LEBs */
for (i = 0; i < 64 ; i++) {
sz = c->lpt_sz * 4; /* Allow 4 times the size */
lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size);
if (lebs_needed > c->lpt_lebs) {
/* Not enough LPT LEBs so try again with more */
c->lpt_lebs = lebs_needed;
c->main_lebs = *main_lebs - c->lpt_lebs;
if (c->main_lebs <= 0)
return -EINVAL;
do_calc_lpt_geom(c);
continue;
}
if (c->ltab_sz > c->leb_size) {
ubifs_err("LPT ltab too big");
return -EINVAL;
}
*main_lebs = c->main_lebs;
*big_lpt = c->big_lpt;
return 0;
}
return -EINVAL;
}
/**
* pack_bits - pack bit fields end-to-end.
* @addr: address at which to pack (passed and next address returned)
* @pos: bit position at which to pack (passed and next position returned)
* @val: value to pack
* @nrbits: number of bits of value to pack (1-32)
*/
static void pack_bits(uint8_t **addr, int *pos, uint32_t val, int nrbits)
{
uint8_t *p = *addr;
int b = *pos;
ubifs_assert(nrbits > 0);
ubifs_assert(nrbits <= 32);
ubifs_assert(*pos >= 0);
ubifs_assert(*pos < 8);
ubifs_assert((val >> nrbits) == 0 || nrbits == 32);
if (b) {
*p |= ((uint8_t)val) << b;
nrbits += b;
if (nrbits > 8) {
*++p = (uint8_t)(val >>= (8 - b));
if (nrbits > 16) {
*++p = (uint8_t)(val >>= 8);
if (nrbits > 24) {
*++p = (uint8_t)(val >>= 8);
if (nrbits > 32)
*++p = (uint8_t)(val >>= 8);
}
}
}
} else {
*p = (uint8_t)val;
if (nrbits > 8) {
*++p = (uint8_t)(val >>= 8);
if (nrbits > 16) {
*++p = (uint8_t)(val >>= 8);
if (nrbits > 24)
*++p = (uint8_t)(val >>= 8);
}
}
}
b = nrbits & 7;
if (b == 0)
p++;
*addr = p;
*pos = b;
}
/**
* ubifs_unpack_bits - unpack bit fields.
* @addr: address at which to unpack (passed and next address returned)
* @pos: bit position at which to unpack (passed and next position returned)
* @nrbits: number of bits of value to unpack (1-32)
*
* This functions returns the value unpacked.
*/
uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits)
{
const int k = 32 - nrbits;
uint8_t *p = *addr;
int b = *pos;
uint32_t uninitialized_var(val);
const int bytes = (nrbits + b + 7) >> 3;
ubifs_assert(nrbits > 0);
ubifs_assert(nrbits <= 32);
ubifs_assert(*pos >= 0);
ubifs_assert(*pos < 8);
if (b) {
switch (bytes) {
case 2:
val = p[1];
break;
case 3:
val = p[1] | ((uint32_t)p[2] << 8);
break;
case 4:
val = p[1] | ((uint32_t)p[2] << 8) |
((uint32_t)p[3] << 16);
break;
case 5:
val = p[1] | ((uint32_t)p[2] << 8) |
((uint32_t)p[3] << 16) |
((uint32_t)p[4] << 24);
}
val <<= (8 - b);
val |= *p >> b;
nrbits += b;
} else {
switch (bytes) {
case 1:
val = p[0];
break;
case 2:
val = p[0] | ((uint32_t)p[1] << 8);
break;
case 3:
val = p[0] | ((uint32_t)p[1] << 8) |
((uint32_t)p[2] << 16);
break;
case 4:
val = p[0] | ((uint32_t)p[1] << 8) |
((uint32_t)p[2] << 16) |
((uint32_t)p[3] << 24);
break;
}
}
val <<= k;
val >>= k;
b = nrbits & 7;
p += nrbits >> 3;
*addr = p;
*pos = b;
ubifs_assert((val >> nrbits) == 0 || nrbits - b == 32);
return val;
}
/**
* ubifs_pack_pnode - pack all the bit fields of a pnode.
* @c: UBIFS file-system description object
* @buf: buffer into which to pack
* @pnode: pnode to pack
*/
void ubifs_pack_pnode(struct ubifs_info *c, void *buf,
struct ubifs_pnode *pnode)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0;
uint16_t crc;
pack_bits(&addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS);
if (c->big_lpt)
pack_bits(&addr, &pos, pnode->num, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
pack_bits(&addr, &pos, pnode->lprops[i].free >> 3,
c->space_bits);
pack_bits(&addr, &pos, pnode->lprops[i].dirty >> 3,
c->space_bits);
if (pnode->lprops[i].flags & LPROPS_INDEX)
pack_bits(&addr, &pos, 1, 1);
else
pack_bits(&addr, &pos, 0, 1);
}
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->pnode_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
* ubifs_pack_nnode - pack all the bit fields of a nnode.
* @c: UBIFS file-system description object
* @buf: buffer into which to pack
* @nnode: nnode to pack
*/
void ubifs_pack_nnode(struct ubifs_info *c, void *buf,
struct ubifs_nnode *nnode)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0;
uint16_t crc;
pack_bits(&addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS);
if (c->big_lpt)
pack_bits(&addr, &pos, nnode->num, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
int lnum = nnode->nbranch[i].lnum;
if (lnum == 0)
lnum = c->lpt_last + 1;
pack_bits(&addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits);
pack_bits(&addr, &pos, nnode->nbranch[i].offs,
c->lpt_offs_bits);
}
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->nnode_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
* ubifs_pack_ltab - pack the LPT's own lprops table.
* @c: UBIFS file-system description object
* @buf: buffer into which to pack
* @ltab: LPT's own lprops table to pack
*/
void ubifs_pack_ltab(struct ubifs_info *c, void *buf,
struct ubifs_lpt_lprops *ltab)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0;
uint16_t crc;
pack_bits(&addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS);
for (i = 0; i < c->lpt_lebs; i++) {
pack_bits(&addr, &pos, ltab[i].free, c->lpt_spc_bits);
pack_bits(&addr, &pos, ltab[i].dirty, c->lpt_spc_bits);
}
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->ltab_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
* ubifs_pack_lsave - pack the LPT's save table.
* @c: UBIFS file-system description object
* @buf: buffer into which to pack
* @lsave: LPT's save table to pack
*/
void ubifs_pack_lsave(struct ubifs_info *c, void *buf, int *lsave)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0;
uint16_t crc;
pack_bits(&addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS);
for (i = 0; i < c->lsave_cnt; i++)
pack_bits(&addr, &pos, lsave[i], c->lnum_bits);
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->lsave_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
* ubifs_add_lpt_dirt - add dirty space to LPT LEB properties.
* @c: UBIFS file-system description object
* @lnum: LEB number to which to add dirty space
* @dirty: amount of dirty space to add
*/
void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty)
{
if (!dirty || !lnum)
return;
dbg_lp("LEB %d add %d to %d",
lnum, dirty, c->ltab[lnum - c->lpt_first].dirty);
ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
c->ltab[lnum - c->lpt_first].dirty += dirty;
}
/**
* set_ltab - set LPT LEB properties.
* @c: UBIFS file-system description object
* @lnum: LEB number
* @free: amount of free space
* @dirty: amount of dirty space
*/
static void set_ltab(struct ubifs_info *c, int lnum, int free, int dirty)
{
dbg_lp("LEB %d free %d dirty %d to %d %d",
lnum, c->ltab[lnum - c->lpt_first].free,
c->ltab[lnum - c->lpt_first].dirty, free, dirty);
ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
c->ltab[lnum - c->lpt_first].free = free;
c->ltab[lnum - c->lpt_first].dirty = dirty;
}
/**
* ubifs_add_nnode_dirt - add dirty space to LPT LEB properties.
* @c: UBIFS file-system description object
* @nnode: nnode for which to add dirt
*/
void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode)
{
struct ubifs_nnode *np = nnode->parent;
if (np)
ubifs_add_lpt_dirt(c, np->nbranch[nnode->iip].lnum,
c->nnode_sz);
else {
ubifs_add_lpt_dirt(c, c->lpt_lnum, c->nnode_sz);
if (!(c->lpt_drty_flgs & LTAB_DIRTY)) {
c->lpt_drty_flgs |= LTAB_DIRTY;
ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz);
}
}
}
/**
* add_pnode_dirt - add dirty space to LPT LEB properties.
* @c: UBIFS file-system description object
* @pnode: pnode for which to add dirt
*/
static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode)
{
ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum,
c->pnode_sz);
}
/**
* calc_nnode_num - calculate nnode number.
* @row: the row in the tree (root is zero)
* @col: the column in the row (leftmost is zero)
*
* The nnode number is a number that uniquely identifies a nnode and can be used
* easily to traverse the tree from the root to that nnode.
*
* This function calculates and returns the nnode number for the nnode at @row
* and @col.
*/
static int calc_nnode_num(int row, int col)
{
int num, bits;
num = 1;
while (row--) {
bits = (col & (UBIFS_LPT_FANOUT - 1));
col >>= UBIFS_LPT_FANOUT_SHIFT;
num <<= UBIFS_LPT_FANOUT_SHIFT;
num |= bits;
}
return num;
}
/**
* calc_nnode_num_from_parent - calculate nnode number.
* @c: UBIFS file-system description object
* @parent: parent nnode
* @iip: index in parent
*
* The nnode number is a number that uniquely identifies a nnode and can be used
* easily to traverse the tree from the root to that nnode.
*
* This function calculates and returns the nnode number based on the parent's
* nnode number and the index in parent.
*/
static int calc_nnode_num_from_parent(const struct ubifs_info *c,
struct ubifs_nnode *parent, int iip)
{
int num, shft;
if (!parent)
return 1;
shft = (c->lpt_hght - parent->level) * UBIFS_LPT_FANOUT_SHIFT;
num = parent->num ^ (1 << shft);
num |= (UBIFS_LPT_FANOUT + iip) << shft;
return num;
}
/**
* calc_pnode_num_from_parent - calculate pnode number.
* @c: UBIFS file-system description object
* @parent: parent nnode
* @iip: index in parent
*
* The pnode number is a number that uniquely identifies a pnode and can be used
* easily to traverse the tree from the root to that pnode.
*
* This function calculates and returns the pnode number based on the parent's
* nnode number and the index in parent.
*/
static int calc_pnode_num_from_parent(const struct ubifs_info *c,
struct ubifs_nnode *parent, int iip)
{
int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0;
for (i = 0; i < n; i++) {
num <<= UBIFS_LPT_FANOUT_SHIFT;
num |= pnum & (UBIFS_LPT_FANOUT - 1);
pnum >>= UBIFS_LPT_FANOUT_SHIFT;
}
num <<= UBIFS_LPT_FANOUT_SHIFT;
num |= iip;
return num;
}
/**
* ubifs_create_dflt_lpt - create default LPT.
* @c: UBIFS file-system description object
* @main_lebs: number of main area LEBs is passed and returned here
* @lpt_first: LEB number of first LPT LEB
* @lpt_lebs: number of LEBs for LPT is passed and returned here
* @big_lpt: use big LPT model is passed and returned here
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
int *lpt_lebs, int *big_lpt)
{
int lnum, err = 0, node_sz, iopos, i, j, cnt, len, alen, row;
int blnum, boffs, bsz, bcnt;
struct ubifs_pnode *pnode = NULL;
struct ubifs_nnode *nnode = NULL;
void *buf = NULL, *p;
struct ubifs_lpt_lprops *ltab = NULL;
int *lsave = NULL;
err = calc_dflt_lpt_geom(c, main_lebs, big_lpt);
if (err)
return err;
*lpt_lebs = c->lpt_lebs;
/* Needed by 'ubifs_pack_nnode()' and 'set_ltab()' */
c->lpt_first = lpt_first;
/* Needed by 'set_ltab()' */
c->lpt_last = lpt_first + c->lpt_lebs - 1;
/* Needed by 'ubifs_pack_lsave()' */
c->main_first = c->leb_cnt - *main_lebs;
lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_KERNEL);
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL);
nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL);
buf = vmalloc(c->leb_size);
ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
if (!pnode || !nnode || !buf || !ltab || !lsave) {
err = -ENOMEM;
goto out;
}
ubifs_assert(!c->ltab);
c->ltab = ltab; /* Needed by set_ltab */
/* Initialize LPT's own lprops */
for (i = 0; i < c->lpt_lebs; i++) {
ltab[i].free = c->leb_size;
ltab[i].dirty = 0;
ltab[i].tgc = 0;
ltab[i].cmt = 0;
}
lnum = lpt_first;
p = buf;
/* Number of leaf nodes (pnodes) */
cnt = c->pnode_cnt;
/*
* The first pnode contains the LEB properties for the LEBs that contain
* the root inode node and the root index node of the index tree.
*/
node_sz = ALIGN(ubifs_idx_node_sz(c, 1), 8);
iopos = ALIGN(node_sz, c->min_io_size);
pnode->lprops[0].free = c->leb_size - iopos;
pnode->lprops[0].dirty = iopos - node_sz;
pnode->lprops[0].flags = LPROPS_INDEX;
node_sz = UBIFS_INO_NODE_SZ;
iopos = ALIGN(node_sz, c->min_io_size);
pnode->lprops[1].free = c->leb_size - iopos;
pnode->lprops[1].dirty = iopos - node_sz;
for (i = 2; i < UBIFS_LPT_FANOUT; i++)
pnode->lprops[i].free = c->leb_size;
/* Add first pnode */
ubifs_pack_pnode(c, p, pnode);
p += c->pnode_sz;
len = c->pnode_sz;
pnode->num += 1;
/* Reset pnode values for remaining pnodes */
pnode->lprops[0].free = c->leb_size;
pnode->lprops[0].dirty = 0;
pnode->lprops[0].flags = 0;
pnode->lprops[1].free = c->leb_size;
pnode->lprops[1].dirty = 0;
/*
* To calculate the internal node branches, we keep information about
* the level below.
*/
blnum = lnum; /* LEB number of level below */
boffs = 0; /* Offset of level below */
bcnt = cnt; /* Number of nodes in level below */
bsz = c->pnode_sz; /* Size of nodes in level below */
/* Add all remaining pnodes */
for (i = 1; i < cnt; i++) {
if (len + c->pnode_sz > c->leb_size) {
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen, alen - len);
memset(p, 0xff, alen - len);
err = ubi_leb_change(c->ubi, lnum++, buf, alen,
UBI_SHORTTERM);
if (err)
goto out;
p = buf;
len = 0;
}
ubifs_pack_pnode(c, p, pnode);
p += c->pnode_sz;
len += c->pnode_sz;
/*
* pnodes are simply numbered left to right starting at zero,
* which means the pnode number can be used easily to traverse
* down the tree to the corresponding pnode.
*/
pnode->num += 1;
}
row = 0;
for (i = UBIFS_LPT_FANOUT; cnt > i; i <<= UBIFS_LPT_FANOUT_SHIFT)
row += 1;
/* Add all nnodes, one level at a time */
while (1) {
/* Number of internal nodes (nnodes) at next level */
cnt = DIV_ROUND_UP(cnt, UBIFS_LPT_FANOUT);
for (i = 0; i < cnt; i++) {
if (len + c->nnode_sz > c->leb_size) {
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen,
alen - len);
memset(p, 0xff, alen - len);
err = ubi_leb_change(c->ubi, lnum++, buf, alen,
UBI_SHORTTERM);
if (err)
goto out;
p = buf;
len = 0;
}
/* Only 1 nnode at this level, so it is the root */
if (cnt == 1) {
c->lpt_lnum = lnum;
c->lpt_offs = len;
}
/* Set branches to the level below */
for (j = 0; j < UBIFS_LPT_FANOUT; j++) {
if (bcnt) {
if (boffs + bsz > c->leb_size) {
blnum += 1;
boffs = 0;
}
nnode->nbranch[j].lnum = blnum;
nnode->nbranch[j].offs = boffs;
boffs += bsz;
bcnt--;
} else {
nnode->nbranch[j].lnum = 0;
nnode->nbranch[j].offs = 0;
}
}
nnode->num = calc_nnode_num(row, i);
ubifs_pack_nnode(c, p, nnode);
p += c->nnode_sz;
len += c->nnode_sz;
}
/* Only 1 nnode at this level, so it is the root */
if (cnt == 1)
break;
/* Update the information about the level below */
bcnt = cnt;
bsz = c->nnode_sz;
row -= 1;
}
if (*big_lpt) {
/* Need to add LPT's save table */
if (len + c->lsave_sz > c->leb_size) {
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen, alen - len);
memset(p, 0xff, alen - len);
err = ubi_leb_change(c->ubi, lnum++, buf, alen,
UBI_SHORTTERM);
if (err)
goto out;
p = buf;
len = 0;
}
c->lsave_lnum = lnum;
c->lsave_offs = len;
for (i = 0; i < c->lsave_cnt && i < *main_lebs; i++)
lsave[i] = c->main_first + i;
for (; i < c->lsave_cnt; i++)
lsave[i] = c->main_first;
ubifs_pack_lsave(c, p, lsave);
p += c->lsave_sz;
len += c->lsave_sz;
}
/* Need to add LPT's own LEB properties table */
if (len + c->ltab_sz > c->leb_size) {
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen, alen - len);
memset(p, 0xff, alen - len);
err = ubi_leb_change(c->ubi, lnum++, buf, alen, UBI_SHORTTERM);
if (err)
goto out;
p = buf;
len = 0;
}
c->ltab_lnum = lnum;
c->ltab_offs = len;
/* Update ltab before packing it */
len += c->ltab_sz;
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen, alen - len);
ubifs_pack_ltab(c, p, ltab);
p += c->ltab_sz;
/* Write remaining buffer */
memset(p, 0xff, alen - len);
err = ubi_leb_change(c->ubi, lnum, buf, alen, UBI_SHORTTERM);
if (err)
goto out;
c->nhead_lnum = lnum;
c->nhead_offs = ALIGN(len, c->min_io_size);
dbg_lp("space_bits %d", c->space_bits);
dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits);
dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits);
dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits);
dbg_lp("pcnt_bits %d", c->pcnt_bits);
dbg_lp("lnum_bits %d", c->lnum_bits);
dbg_lp("pnode_sz %d", c->pnode_sz);
dbg_lp("nnode_sz %d", c->nnode_sz);
dbg_lp("ltab_sz %d", c->ltab_sz);
dbg_lp("lsave_sz %d", c->lsave_sz);
dbg_lp("lsave_cnt %d", c->lsave_cnt);
dbg_lp("lpt_hght %d", c->lpt_hght);
dbg_lp("big_lpt %d", c->big_lpt);
dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs);
dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs);
dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
if (c->big_lpt)
dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
out:
c->ltab = NULL;
kfree(lsave);
vfree(ltab);
vfree(buf);
kfree(nnode);
kfree(pnode);
return err;
}
/**
* update_cats - add LEB properties of a pnode to LEB category lists and heaps.
* @c: UBIFS file-system description object
* @pnode: pnode
*
* When a pnode is loaded into memory, the LEB properties it contains are added,
* by this function, to the LEB category lists and heaps.
*/
static void update_cats(struct ubifs_info *c, struct ubifs_pnode *pnode)
{
int i;
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
int cat = pnode->lprops[i].flags & LPROPS_CAT_MASK;
int lnum = pnode->lprops[i].lnum;
if (!lnum)
return;
ubifs_add_to_cat(c, &pnode->lprops[i], cat);
}
}
/**
* replace_cats - add LEB properties of a pnode to LEB category lists and heaps.
* @c: UBIFS file-system description object
* @old_pnode: pnode copied
* @new_pnode: pnode copy
*
* During commit it is sometimes necessary to copy a pnode
* (see dirty_cow_pnode). When that happens, references in
* category lists and heaps must be replaced. This function does that.
*/
static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode,
struct ubifs_pnode *new_pnode)
{
int i;
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
if (!new_pnode->lprops[i].lnum)
return;
ubifs_replace_cat(c, &old_pnode->lprops[i],
&new_pnode->lprops[i]);
}
}
/**
* check_lpt_crc - check LPT node crc is correct.
* @c: UBIFS file-system description object
* @buf: buffer containing node
* @len: length of node
*
* This function returns %0 on success and a negative error code on failure.
*/
static int check_lpt_crc(void *buf, int len)
{
int pos = 0;
uint8_t *addr = buf;
uint16_t crc, calc_crc;
crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS);
calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
len - UBIFS_LPT_CRC_BYTES);
if (crc != calc_crc) {
ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc,
calc_crc);
dbg_dump_stack();
return -EINVAL;
}
return 0;
}
/**
* check_lpt_type - check LPT node type is correct.
* @c: UBIFS file-system description object
* @addr: address of type bit field is passed and returned updated here
* @pos: position of type bit field is passed and returned updated here
* @type: expected type
*
* This function returns %0 on success and a negative error code on failure.
*/
static int check_lpt_type(uint8_t **addr, int *pos, int type)
{
int node_type;
node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS);
if (node_type != type) {
ubifs_err("invalid type (%d) in LPT node type %d", node_type,
type);
dbg_dump_stack();
return -EINVAL;
}
return 0;
}
/**
* unpack_pnode - unpack a pnode.
* @c: UBIFS file-system description object
* @buf: buffer containing packed pnode to unpack
* @pnode: pnode structure to fill
*
* This function returns %0 on success and a negative error code on failure.
*/
static int unpack_pnode(const struct ubifs_info *c, void *buf,
struct ubifs_pnode *pnode)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
err = check_lpt_type(&addr, &pos, UBIFS_LPT_PNODE);
if (err)
return err;
if (c->big_lpt)
pnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
struct ubifs_lprops * const lprops = &pnode->lprops[i];
lprops->free = ubifs_unpack_bits(&addr, &pos, c->space_bits);
lprops->free <<= 3;
lprops->dirty = ubifs_unpack_bits(&addr, &pos, c->space_bits);
lprops->dirty <<= 3;
if (ubifs_unpack_bits(&addr, &pos, 1))
lprops->flags = LPROPS_INDEX;
else
lprops->flags = 0;
lprops->flags |= ubifs_categorize_lprops(c, lprops);
}
err = check_lpt_crc(buf, c->pnode_sz);
return err;
}
/**
* ubifs_unpack_nnode - unpack a nnode.
* @c: UBIFS file-system description object
* @buf: buffer containing packed nnode to unpack
* @nnode: nnode structure to fill
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
struct ubifs_nnode *nnode)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
err = check_lpt_type(&addr, &pos, UBIFS_LPT_NNODE);
if (err)
return err;
if (c->big_lpt)
nnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
int lnum;
lnum = ubifs_unpack_bits(&addr, &pos, c->lpt_lnum_bits) +
c->lpt_first;
if (lnum == c->lpt_last + 1)
lnum = 0;
nnode->nbranch[i].lnum = lnum;
nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos,
c->lpt_offs_bits);
}
err = check_lpt_crc(buf, c->nnode_sz);
return err;
}
/**
* unpack_ltab - unpack the LPT's own lprops table.
* @c: UBIFS file-system description object
* @buf: buffer from which to unpack
*
* This function returns %0 on success and a negative error code on failure.
*/
static int unpack_ltab(const struct ubifs_info *c, void *buf)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
err = check_lpt_type(&addr, &pos, UBIFS_LPT_LTAB);
if (err)
return err;
for (i = 0; i < c->lpt_lebs; i++) {
int free = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
int dirty = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
if (free < 0 || free > c->leb_size || dirty < 0 ||
dirty > c->leb_size || free + dirty > c->leb_size)
return -EINVAL;
c->ltab[i].free = free;
c->ltab[i].dirty = dirty;
c->ltab[i].tgc = 0;
c->ltab[i].cmt = 0;
}
err = check_lpt_crc(buf, c->ltab_sz);
return err;
}
/**
* unpack_lsave - unpack the LPT's save table.
* @c: UBIFS file-system description object
* @buf: buffer from which to unpack
*
* This function returns %0 on success and a negative error code on failure.
*/
static int unpack_lsave(const struct ubifs_info *c, void *buf)
{
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int i, pos = 0, err;
err = check_lpt_type(&addr, &pos, UBIFS_LPT_LSAVE);
if (err)
return err;
for (i = 0; i < c->lsave_cnt; i++) {
int lnum = ubifs_unpack_bits(&addr, &pos, c->lnum_bits);
if (lnum < c->main_first || lnum >= c->leb_cnt)
return -EINVAL;
c->lsave[i] = lnum;
}
err = check_lpt_crc(buf, c->lsave_sz);
return err;
}
/**
* validate_nnode - validate a nnode.
* @c: UBIFS file-system description object
* @nnode: nnode to validate
* @parent: parent nnode (or NULL for the root nnode)
* @iip: index in parent
*
* This function returns %0 on success and a negative error code on failure.
*/
static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode,
struct ubifs_nnode *parent, int iip)
{
int i, lvl, max_offs;
if (c->big_lpt) {
int num = calc_nnode_num_from_parent(c, parent, iip);
if (nnode->num != num)
return -EINVAL;
}
lvl = parent ? parent->level - 1 : c->lpt_hght;
if (lvl < 1)
return -EINVAL;
if (lvl == 1)
max_offs = c->leb_size - c->pnode_sz;
else
max_offs = c->leb_size - c->nnode_sz;
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
int lnum = nnode->nbranch[i].lnum;
int offs = nnode->nbranch[i].offs;
if (lnum == 0) {
if (offs != 0)
return -EINVAL;
continue;
}
if (lnum < c->lpt_first || lnum > c->lpt_last)
return -EINVAL;
if (offs < 0 || offs > max_offs)
return -EINVAL;
}
return 0;
}
/**
* validate_pnode - validate a pnode.
* @c: UBIFS file-system description object
* @pnode: pnode to validate
* @parent: parent nnode
* @iip: index in parent
*
* This function returns %0 on success and a negative error code on failure.
*/
static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode,
struct ubifs_nnode *parent, int iip)
{
int i;
if (c->big_lpt) {
int num = calc_pnode_num_from_parent(c, parent, iip);
if (pnode->num != num)
return -EINVAL;
}
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
int free = pnode->lprops[i].free;
int dirty = pnode->lprops[i].dirty;
if (free < 0 || free > c->leb_size || free % c->min_io_size ||
(free & 7))
return -EINVAL;
if (dirty < 0 || dirty > c->leb_size || (dirty & 7))
return -EINVAL;
if (dirty + free > c->leb_size)
return -EINVAL;
}
return 0;
}
/**
* set_pnode_lnum - set LEB numbers on a pnode.
* @c: UBIFS file-system description object
* @pnode: pnode to update
*
* This function calculates the LEB numbers for the LEB properties it contains
* based on the pnode number.
*/
static void set_pnode_lnum(const struct ubifs_info *c,
struct ubifs_pnode *pnode)
{
int i, lnum;
lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + c->main_first;
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
if (lnum >= c->leb_cnt)
return;
pnode->lprops[i].lnum = lnum++;
}
}
/**
* ubifs_read_nnode - read a nnode from flash and link it to the tree in memory.
* @c: UBIFS file-system description object
* @parent: parent nnode (or NULL for the root)
* @iip: index in parent
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
{
struct ubifs_nbranch *branch = NULL;
struct ubifs_nnode *nnode = NULL;
void *buf = c->lpt_nod_buf;
int err, lnum, offs;
if (parent) {
branch = &parent->nbranch[iip];
lnum = branch->lnum;
offs = branch->offs;
} else {
lnum = c->lpt_lnum;
offs = c->lpt_offs;
}
nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_NOFS);
if (!nnode) {
err = -ENOMEM;
goto out;
}
if (lnum == 0) {
/*
* This nnode was not written which just means that the LEB
* properties in the subtree below it describe empty LEBs. We
* make the nnode as though we had read it, which in fact means
* doing almost nothing.
*/
if (c->big_lpt)
nnode->num = calc_nnode_num_from_parent(c, parent, iip);
} else {
err = ubi_read(c->ubi, lnum, buf, offs, c->nnode_sz);
if (err)
goto out;
err = ubifs_unpack_nnode(c, buf, nnode);
if (err)
goto out;
}
err = validate_nnode(c, nnode, parent, iip);
if (err)
goto out;
if (!c->big_lpt)
nnode->num = calc_nnode_num_from_parent(c, parent, iip);
if (parent) {
branch->nnode = nnode;
nnode->level = parent->level - 1;
} else {
c->nroot = nnode;
nnode->level = c->lpt_hght;
}
nnode->parent = parent;
nnode->iip = iip;
return 0;
out:
ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs);
kfree(nnode);
return err;
}
/**
* read_pnode - read a pnode from flash and link it to the tree in memory.
* @c: UBIFS file-system description object
* @parent: parent nnode
* @iip: index in parent
*
* This function returns %0 on success and a negative error code on failure.
*/
static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
{
struct ubifs_nbranch *branch;
struct ubifs_pnode *pnode = NULL;
void *buf = c->lpt_nod_buf;
int err, lnum, offs;
branch = &parent->nbranch[iip];
lnum = branch->lnum;
offs = branch->offs;
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
if (!pnode)
return -ENOMEM;
if (lnum == 0) {
/*
* This pnode was not written which just means that the LEB
* properties in it describe empty LEBs. We make the pnode as
* though we had read it.
*/
int i;
if (c->big_lpt)
pnode->num = calc_pnode_num_from_parent(c, parent, iip);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
struct ubifs_lprops * const lprops = &pnode->lprops[i];
lprops->free = c->leb_size;
lprops->flags = ubifs_categorize_lprops(c, lprops);
}
} else {
err = ubi_read(c->ubi, lnum, buf, offs, c->pnode_sz);
if (err)
goto out;
err = unpack_pnode(c, buf, pnode);
if (err)
goto out;
}
err = validate_pnode(c, pnode, parent, iip);
if (err)
goto out;
if (!c->big_lpt)
pnode->num = calc_pnode_num_from_parent(c, parent, iip);
branch->pnode = pnode;
pnode->parent = parent;
pnode->iip = iip;
set_pnode_lnum(c, pnode);
c->pnodes_have += 1;
return 0;
out:
ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
dbg_dump_pnode(c, pnode, parent, iip);
dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
kfree(pnode);
return err;
}
/**
* read_ltab - read LPT's own lprops table.
* @c: UBIFS file-system description object
*
* This function returns %0 on success and a negative error code on failure.
*/
static int read_ltab(struct ubifs_info *c)
{
int err;
void *buf;
buf = vmalloc(c->ltab_sz);
if (!buf)
return -ENOMEM;
err = ubi_read(c->ubi, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz);
if (err)
goto out;
err = unpack_ltab(c, buf);
out:
vfree(buf);
return err;
}
/**
* read_lsave - read LPT's save table.
* @c: UBIFS file-system description object
*
* This function returns %0 on success and a negative error code on failure.
*/
static int read_lsave(struct ubifs_info *c)
{
int err, i;
void *buf;
buf = vmalloc(c->lsave_sz);
if (!buf)
return -ENOMEM;
err = ubi_read(c->ubi, c->lsave_lnum, buf, c->lsave_offs, c->lsave_sz);
if (err)
goto out;
err = unpack_lsave(c, buf);
if (err)
goto out;
for (i = 0; i < c->lsave_cnt; i++) {
int lnum = c->lsave[i];
struct ubifs_lprops *lprops;
/*
* Due to automatic resizing, the values in the lsave table
* could be beyond the volume size - just ignore them.
*/
if (lnum >= c->leb_cnt)
continue;
lprops = ubifs_lpt_lookup(c, lnum);
if (IS_ERR(lprops)) {
err = PTR_ERR(lprops);
goto out;
}
}
out:
vfree(buf);
return err;
}
/**
* ubifs_get_nnode - get a nnode.
* @c: UBIFS file-system description object
* @parent: parent nnode (or NULL for the root)
* @iip: index in parent
*
* This function returns a pointer to the nnode on success or a negative error
* code on failure.
*/
struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c,
struct ubifs_nnode *parent, int iip)
{
struct ubifs_nbranch *branch;
struct ubifs_nnode *nnode;
int err;
branch = &parent->nbranch[iip];
nnode = branch->nnode;
if (nnode)
return nnode;
err = ubifs_read_nnode(c, parent, iip);
if (err)
return ERR_PTR(err);
return branch->nnode;
}
/**
* ubifs_get_pnode - get a pnode.
* @c: UBIFS file-system description object
* @parent: parent nnode
* @iip: index in parent
*
* This function returns a pointer to the pnode on success or a negative error
* code on failure.
*/
struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c,
struct ubifs_nnode *parent, int iip)
{
struct ubifs_nbranch *branch;
struct ubifs_pnode *pnode;
int err;
branch = &parent->nbranch[iip];
pnode = branch->pnode;
if (pnode)
return pnode;
err = read_pnode(c, parent, iip);
if (err)
return ERR_PTR(err);
update_cats(c, branch->pnode);
return branch->pnode;
}
/**
* ubifs_lpt_lookup - lookup LEB properties in the LPT.
* @c: UBIFS file-system description object
* @lnum: LEB number to lookup
*
* This function returns a pointer to the LEB properties on success or a
* negative error code on failure.
*/
struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
{
int err, i, h, iip, shft;
struct ubifs_nnode *nnode;
struct ubifs_pnode *pnode;
if (!c->nroot) {
err = ubifs_read_nnode(c, NULL, 0);
if (err)
return ERR_PTR(err);
}
nnode = c->nroot;
i = lnum - c->main_first;
shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
for (h = 1; h < c->lpt_hght; h++) {
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
nnode = ubifs_get_nnode(c, nnode, iip);
if (IS_ERR(nnode))
return ERR_CAST(nnode);
}
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
pnode = ubifs_get_pnode(c, nnode, iip);
if (IS_ERR(pnode))
return ERR_CAST(pnode);
iip = (i & (UBIFS_LPT_FANOUT - 1));
dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
pnode->lprops[iip].free, pnode->lprops[iip].dirty,
pnode->lprops[iip].flags);
return &pnode->lprops[iip];
}
/**
* dirty_cow_nnode - ensure a nnode is not being committed.
* @c: UBIFS file-system description object
* @nnode: nnode to check
*
* Returns dirtied nnode on success or negative error code on failure.
*/
static struct ubifs_nnode *dirty_cow_nnode(struct ubifs_info *c,
struct ubifs_nnode *nnode)
{
struct ubifs_nnode *n;
int i;
if (!test_bit(COW_CNODE, &nnode->flags)) {
/* nnode is not being committed */
if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
c->dirty_nn_cnt += 1;
ubifs_add_nnode_dirt(c, nnode);
}
return nnode;
}
/* nnode is being committed, so copy it */
n = kmalloc(sizeof(struct ubifs_nnode), GFP_NOFS);
if (unlikely(!n))
return ERR_PTR(-ENOMEM);
memcpy(n, nnode, sizeof(struct ubifs_nnode));
n->cnext = NULL;
__set_bit(DIRTY_CNODE, &n->flags);
__clear_bit(COW_CNODE, &n->flags);
/* The children now have new parent */
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
struct ubifs_nbranch *branch = &n->nbranch[i];
if (branch->cnode)
branch->cnode->parent = n;
}
ubifs_assert(!test_bit(OBSOLETE_CNODE, &nnode->flags));
__set_bit(OBSOLETE_CNODE, &nnode->flags);
c->dirty_nn_cnt += 1;
ubifs_add_nnode_dirt(c, nnode);
if (nnode->parent)
nnode->parent->nbranch[n->iip].nnode = n;
else
c->nroot = n;
return n;
}
/**
* dirty_cow_pnode - ensure a pnode is not being committed.
* @c: UBIFS file-system description object
* @pnode: pnode to check
*
* Returns dirtied pnode on success or negative error code on failure.
*/
static struct ubifs_pnode *dirty_cow_pnode(struct ubifs_info *c,
struct ubifs_pnode *pnode)
{
struct ubifs_pnode *p;
if (!test_bit(COW_CNODE, &pnode->flags)) {
/* pnode is not being committed */
if (!test_and_set_bit(DIRTY_CNODE, &pnode->flags)) {
c->dirty_pn_cnt += 1;
add_pnode_dirt(c, pnode);
}
return pnode;
}
/* pnode is being committed, so copy it */
p = kmalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
if (unlikely(!p))
return ERR_PTR(-ENOMEM);
memcpy(p, pnode, sizeof(struct ubifs_pnode));
p->cnext = NULL;
__set_bit(DIRTY_CNODE, &p->flags);
__clear_bit(COW_CNODE, &p->flags);
replace_cats(c, pnode, p);
ubifs_assert(!test_bit(OBSOLETE_CNODE, &pnode->flags));
__set_bit(OBSOLETE_CNODE, &pnode->flags);
c->dirty_pn_cnt += 1;
add_pnode_dirt(c, pnode);
pnode->parent->nbranch[p->iip].pnode = p;
return p;
}
/**
* ubifs_lpt_lookup_dirty - lookup LEB properties in the LPT.
* @c: UBIFS file-system description object
* @lnum: LEB number to lookup
*
* This function returns a pointer to the LEB properties on success or a
* negative error code on failure.
*/
struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
{
int err, i, h, iip, shft;
struct ubifs_nnode *nnode;
struct ubifs_pnode *pnode;
if (!c->nroot) {
err = ubifs_read_nnode(c, NULL, 0);
if (err)
return ERR_PTR(err);
}
nnode = c->nroot;
nnode = dirty_cow_nnode(c, nnode);
if (IS_ERR(nnode))
return ERR_CAST(nnode);
i = lnum - c->main_first;
shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
for (h = 1; h < c->lpt_hght; h++) {
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
nnode = ubifs_get_nnode(c, nnode, iip);
if (IS_ERR(nnode))
return ERR_CAST(nnode);
nnode = dirty_cow_nnode(c, nnode);
if (IS_ERR(nnode))
return ERR_CAST(nnode);
}
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
pnode = ubifs_get_pnode(c, nnode, iip);
if (IS_ERR(pnode))
return ERR_CAST(pnode);
pnode = dirty_cow_pnode(c, pnode);
if (IS_ERR(pnode))
return ERR_CAST(pnode);
iip = (i & (UBIFS_LPT_FANOUT - 1));
dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
pnode->lprops[iip].free, pnode->lprops[iip].dirty,
pnode->lprops[iip].flags);
ubifs_assert(test_bit(DIRTY_CNODE, &pnode->flags));
return &pnode->lprops[iip];
}
/**
* lpt_init_rd - initialize the LPT for reading.
* @c: UBIFS file-system description object
*
* This function returns %0 on success and a negative error code on failure.
*/
static int lpt_init_rd(struct ubifs_info *c)
{
int err, i;
c->ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
if (!c->ltab)
return -ENOMEM;
i = max_t(int, c->nnode_sz, c->pnode_sz);
c->lpt_nod_buf = kmalloc(i, GFP_KERNEL);
if (!c->lpt_nod_buf)
return -ENOMEM;
for (i = 0; i < LPROPS_HEAP_CNT; i++) {
c->lpt_heap[i].arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ,
GFP_KERNEL);
if (!c->lpt_heap[i].arr)
return -ENOMEM;
c->lpt_heap[i].cnt = 0;
c->lpt_heap[i].max_cnt = LPT_HEAP_SZ;
}
c->dirty_idx.arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ, GFP_KERNEL);
if (!c->dirty_idx.arr)
return -ENOMEM;
c->dirty_idx.cnt = 0;
c->dirty_idx.max_cnt = LPT_HEAP_SZ;
err = read_ltab(c);
if (err)
return err;
dbg_lp("space_bits %d", c->space_bits);
dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits);
dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits);
dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits);
dbg_lp("pcnt_bits %d", c->pcnt_bits);
dbg_lp("lnum_bits %d", c->lnum_bits);
dbg_lp("pnode_sz %d", c->pnode_sz);
dbg_lp("nnode_sz %d", c->nnode_sz);
dbg_lp("ltab_sz %d", c->ltab_sz);
dbg_lp("lsave_sz %d", c->lsave_sz);
dbg_lp("lsave_cnt %d", c->lsave_cnt);
dbg_lp("lpt_hght %d", c->lpt_hght);
dbg_lp("big_lpt %d", c->big_lpt);
dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs);
dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs);
dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
if (c->big_lpt)
dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
return 0;
}
/**
* lpt_init_wr - initialize the LPT for writing.
* @c: UBIFS file-system description object
*
* 'lpt_init_rd()' must have been called already.
*
* This function returns %0 on success and a negative error code on failure.
*/
static int lpt_init_wr(struct ubifs_info *c)
{
int err, i;
c->ltab_cmt = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
if (!c->ltab_cmt)
return -ENOMEM;
c->lpt_buf = vmalloc(c->leb_size);
if (!c->lpt_buf)
return -ENOMEM;
if (c->big_lpt) {
c->lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_NOFS);
if (!c->lsave)
return -ENOMEM;
err = read_lsave(c);
if (err)
return err;
}
for (i = 0; i < c->lpt_lebs; i++)
if (c->ltab[i].free == c->leb_size) {
err = ubifs_leb_unmap(c, i + c->lpt_first);
if (err)
return err;
}
return 0;
}
/**
* ubifs_lpt_init - initialize the LPT.
* @c: UBIFS file-system description object
* @rd: whether to initialize lpt for reading
* @wr: whether to initialize lpt for writing
*
* For mounting 'rw', @rd and @wr are both true. For mounting 'ro', @rd is true
* and @wr is false. For mounting from 'ro' to 'rw', @rd is false and @wr is
* true.
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
{
int err;
if (rd) {
err = lpt_init_rd(c);
if (err)
return err;
}
if (wr) {
err = lpt_init_wr(c);
if (err)
return err;
}
return 0;
}
/**
* struct lpt_scan_node - somewhere to put nodes while we scan LPT.
* @nnode: where to keep a nnode
* @pnode: where to keep a pnode
* @cnode: where to keep a cnode
* @in_tree: is the node in the tree in memory
* @ptr.nnode: pointer to the nnode (if it is an nnode) which may be here or in
* the tree
* @ptr.pnode: ditto for pnode
* @ptr.cnode: ditto for cnode
*/
struct lpt_scan_node {
union {
struct ubifs_nnode nnode;
struct ubifs_pnode pnode;
struct ubifs_cnode cnode;
};
int in_tree;
union {
struct ubifs_nnode *nnode;
struct ubifs_pnode *pnode;
struct ubifs_cnode *cnode;
} ptr;
};
/**
* scan_get_nnode - for the scan, get a nnode from either the tree or flash.
* @c: the UBIFS file-system description object
* @path: where to put the nnode
* @parent: parent of the nnode
* @iip: index in parent of the nnode
*
* This function returns a pointer to the nnode on success or a negative error
* code on failure.
*/
static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c,
struct lpt_scan_node *path,
struct ubifs_nnode *parent, int iip)
{
struct ubifs_nbranch *branch;
struct ubifs_nnode *nnode;
void *buf = c->lpt_nod_buf;
int err;
branch = &parent->nbranch[iip];
nnode = branch->nnode;
if (nnode) {
path->in_tree = 1;
path->ptr.nnode = nnode;
return nnode;
}
nnode = &path->nnode;
path->in_tree = 0;
path->ptr.nnode = nnode;
memset(nnode, 0, sizeof(struct ubifs_nnode));
if (branch->lnum == 0) {
/*
* This nnode was not written which just means that the LEB
* properties in the subtree below it describe empty LEBs. We
* make the nnode as though we had read it, which in fact means
* doing almost nothing.
*/
if (c->big_lpt)
nnode->num = calc_nnode_num_from_parent(c, parent, iip);
} else {
err = ubi_read(c->ubi, branch->lnum, buf, branch->offs,
c->nnode_sz);
if (err)
return ERR_PTR(err);
err = ubifs_unpack_nnode(c, buf, nnode);
if (err)
return ERR_PTR(err);
}
err = validate_nnode(c, nnode, parent, iip);
if (err)
return ERR_PTR(err);
if (!c->big_lpt)
nnode->num = calc_nnode_num_from_parent(c, parent, iip);
nnode->level = parent->level - 1;
nnode->parent = parent;
nnode->iip = iip;
return nnode;
}
/**
* scan_get_pnode - for the scan, get a pnode from either the tree or flash.
* @c: the UBIFS file-system description object
* @path: where to put the pnode
* @parent: parent of the pnode
* @iip: index in parent of the pnode
*
* This function returns a pointer to the pnode on success or a negative error
* code on failure.
*/
static struct ubifs_pnode *scan_get_pnode(struct ubifs_info *c,
struct lpt_scan_node *path,
struct ubifs_nnode *parent, int iip)
{
struct ubifs_nbranch *branch;
struct ubifs_pnode *pnode;
void *buf = c->lpt_nod_buf;
int err;
branch = &parent->nbranch[iip];
pnode = branch->pnode;
if (pnode) {
path->in_tree = 1;
path->ptr.pnode = pnode;
return pnode;
}
pnode = &path->pnode;
path->in_tree = 0;
path->ptr.pnode = pnode;
memset(pnode, 0, sizeof(struct ubifs_pnode));
if (branch->lnum == 0) {
/*
* This pnode was not written which just means that the LEB
* properties in it describe empty LEBs. We make the pnode as
* though we had read it.
*/
int i;
if (c->big_lpt)
pnode->num = calc_pnode_num_from_parent(c, parent, iip);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
struct ubifs_lprops * const lprops = &pnode->lprops[i];
lprops->free = c->leb_size;
lprops->flags = ubifs_categorize_lprops(c, lprops);
}
} else {
ubifs_assert(branch->lnum >= c->lpt_first &&
branch->lnum <= c->lpt_last);
ubifs_assert(branch->offs >= 0 && branch->offs < c->leb_size);
err = ubi_read(c->ubi, branch->lnum, buf, branch->offs,
c->pnode_sz);
if (err)
return ERR_PTR(err);
err = unpack_pnode(c, buf, pnode);
if (err)
return ERR_PTR(err);
}
err = validate_pnode(c, pnode, parent, iip);
if (err)
return ERR_PTR(err);
if (!c->big_lpt)
pnode->num = calc_pnode_num_from_parent(c, parent, iip);
pnode->parent = parent;
pnode->iip = iip;
set_pnode_lnum(c, pnode);
return pnode;
}
/**
* ubifs_lpt_scan_nolock - scan the LPT.
* @c: the UBIFS file-system description object
* @start_lnum: LEB number from which to start scanning
* @end_lnum: LEB number at which to stop scanning
* @scan_cb: callback function called for each lprops
* @data: data to be passed to the callback function
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum,
ubifs_lpt_scan_callback scan_cb, void *data)
{
int err = 0, i, h, iip, shft;
struct ubifs_nnode *nnode;
struct ubifs_pnode *pnode;
struct lpt_scan_node *path;
if (start_lnum == -1) {
start_lnum = end_lnum + 1;
if (start_lnum >= c->leb_cnt)
start_lnum = c->main_first;
}
ubifs_assert(start_lnum >= c->main_first && start_lnum < c->leb_cnt);
ubifs_assert(end_lnum >= c->main_first && end_lnum < c->leb_cnt);
if (!c->nroot) {
err = ubifs_read_nnode(c, NULL, 0);
if (err)
return err;
}
path = kmalloc(sizeof(struct lpt_scan_node) * (c->lpt_hght + 1),
GFP_NOFS);
if (!path)
return -ENOMEM;
path[0].ptr.nnode = c->nroot;
path[0].in_tree = 1;
again:
/* Descend to the pnode containing start_lnum */
nnode = c->nroot;
i = start_lnum - c->main_first;
shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
for (h = 1; h < c->lpt_hght; h++) {
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
nnode = scan_get_nnode(c, path + h, nnode, iip);
if (IS_ERR(nnode)) {
err = PTR_ERR(nnode);
goto out;
}
}
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
pnode = scan_get_pnode(c, path + h, nnode, iip);
if (IS_ERR(pnode)) {
err = PTR_ERR(pnode);
goto out;
}
iip = (i & (UBIFS_LPT_FANOUT - 1));
/* Loop for each lprops */
while (1) {
struct ubifs_lprops *lprops = &pnode->lprops[iip];
int ret, lnum = lprops->lnum;
ret = scan_cb(c, lprops, path[h].in_tree, data);
if (ret < 0) {
err = ret;
goto out;
}
if (ret & LPT_SCAN_ADD) {
/* Add all the nodes in path to the tree in memory */
for (h = 1; h < c->lpt_hght; h++) {
const size_t sz = sizeof(struct ubifs_nnode);
struct ubifs_nnode *parent;
if (path[h].in_tree)
continue;
nnode = kmalloc(sz, GFP_NOFS);
if (!nnode) {
err = -ENOMEM;
goto out;
}
memcpy(nnode, &path[h].nnode, sz);
parent = nnode->parent;
parent->nbranch[nnode->iip].nnode = nnode;
path[h].ptr.nnode = nnode;
path[h].in_tree = 1;
path[h + 1].cnode.parent = nnode;
}
if (path[h].in_tree)
ubifs_ensure_cat(c, lprops);
else {
const size_t sz = sizeof(struct ubifs_pnode);
struct ubifs_nnode *parent;
pnode = kmalloc(sz, GFP_NOFS);
if (!pnode) {
err = -ENOMEM;
goto out;
}
memcpy(pnode, &path[h].pnode, sz);
parent = pnode->parent;
parent->nbranch[pnode->iip].pnode = pnode;
path[h].ptr.pnode = pnode;
path[h].in_tree = 1;
update_cats(c, pnode);
c->pnodes_have += 1;
}
err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *)
c->nroot, 0, 0);
if (err)
goto out;
err = dbg_check_cats(c);
if (err)
goto out;
}
if (ret & LPT_SCAN_STOP) {
err = 0;
break;
}
/* Get the next lprops */
if (lnum == end_lnum) {
/*
* We got to the end without finding what we were
* looking for
*/
err = -ENOSPC;
goto out;
}
if (lnum + 1 >= c->leb_cnt) {
/* Wrap-around to the beginning */
start_lnum = c->main_first;
goto again;
}
if (iip + 1 < UBIFS_LPT_FANOUT) {
/* Next lprops is in the same pnode */
iip += 1;
continue;
}
/* We need to get the next pnode. Go up until we can go right */
iip = pnode->iip;
while (1) {
h -= 1;
ubifs_assert(h >= 0);
nnode = path[h].ptr.nnode;
if (iip + 1 < UBIFS_LPT_FANOUT)
break;
iip = nnode->iip;
}
/* Go right */
iip += 1;
/* Descend to the pnode */
h += 1;
for (; h < c->lpt_hght; h++) {
nnode = scan_get_nnode(c, path + h, nnode, iip);
if (IS_ERR(nnode)) {
err = PTR_ERR(nnode);
goto out;
}
iip = 0;
}
pnode = scan_get_pnode(c, path + h, nnode, iip);
if (IS_ERR(pnode)) {
err = PTR_ERR(pnode);
goto out;
}
iip = 0;
}
out:
kfree(path);
return err;
}
#ifdef CONFIG_UBIFS_FS_DEBUG
/**
* dbg_chk_pnode - check a pnode.
* @c: the UBIFS file-system description object
* @pnode: pnode to check
* @col: pnode column
*
* This function returns %0 on success and a negative error code on failure.
*/
static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
int col)
{
int i;
if (pnode->num != col) {
dbg_err("pnode num %d expected %d parent num %d iip %d",
pnode->num, col, pnode->parent->num, pnode->iip);
return -EINVAL;
}
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
struct ubifs_lprops *lp, *lprops = &pnode->lprops[i];
int lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + i +
c->main_first;
int found, cat = lprops->flags & LPROPS_CAT_MASK;
struct ubifs_lpt_heap *heap;
struct list_head *list = NULL;
if (lnum >= c->leb_cnt)
continue;
if (lprops->lnum != lnum) {
dbg_err("bad LEB number %d expected %d",
lprops->lnum, lnum);
return -EINVAL;
}
if (lprops->flags & LPROPS_TAKEN) {
if (cat != LPROPS_UNCAT) {
dbg_err("LEB %d taken but not uncat %d",
lprops->lnum, cat);
return -EINVAL;
}
continue;
}
if (lprops->flags & LPROPS_INDEX) {
switch (cat) {
case LPROPS_UNCAT:
case LPROPS_DIRTY_IDX:
case LPROPS_FRDI_IDX:
break;
default:
dbg_err("LEB %d index but cat %d",
lprops->lnum, cat);
return -EINVAL;
}
} else {
switch (cat) {
case LPROPS_UNCAT:
case LPROPS_DIRTY:
case LPROPS_FREE:
case LPROPS_EMPTY:
case LPROPS_FREEABLE:
break;
default:
dbg_err("LEB %d not index but cat %d",
lprops->lnum, cat);
return -EINVAL;
}
}
switch (cat) {
case LPROPS_UNCAT:
list = &c->uncat_list;
break;
case LPROPS_EMPTY:
list = &c->empty_list;
break;
case LPROPS_FREEABLE:
list = &c->freeable_list;
break;
case LPROPS_FRDI_IDX:
list = &c->frdi_idx_list;
break;
}
found = 0;
switch (cat) {
case LPROPS_DIRTY:
case LPROPS_DIRTY_IDX:
case LPROPS_FREE:
heap = &c->lpt_heap[cat - 1];
if (lprops->hpos < heap->cnt &&
heap->arr[lprops->hpos] == lprops)
found = 1;
break;
case LPROPS_UNCAT:
case LPROPS_EMPTY:
case LPROPS_FREEABLE:
case LPROPS_FRDI_IDX:
list_for_each_entry(lp, list, list)
if (lprops == lp) {
found = 1;
break;
}
break;
}
if (!found) {
dbg_err("LEB %d cat %d not found in cat heap/list",
lprops->lnum, cat);
return -EINVAL;
}
switch (cat) {
case LPROPS_EMPTY:
if (lprops->free != c->leb_size) {
dbg_err("LEB %d cat %d free %d dirty %d",
lprops->lnum, cat, lprops->free,
lprops->dirty);
return -EINVAL;
}
case LPROPS_FREEABLE:
case LPROPS_FRDI_IDX:
if (lprops->free + lprops->dirty != c->leb_size) {
dbg_err("LEB %d cat %d free %d dirty %d",
lprops->lnum, cat, lprops->free,
lprops->dirty);
return -EINVAL;
}
}
}
return 0;
}
/**
* dbg_check_lpt_nodes - check nnodes and pnodes.
* @c: the UBIFS file-system description object
* @cnode: next cnode (nnode or pnode) to check
* @row: row of cnode (root is zero)
* @col: column of cnode (leftmost is zero)
*
* This function returns %0 on success and a negative error code on failure.
*/
int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
int row, int col)
{
struct ubifs_nnode *nnode, *nn;
struct ubifs_cnode *cn;
int num, iip = 0, err;
if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
return 0;
while (cnode) {
ubifs_assert(row >= 0);
nnode = cnode->parent;
if (cnode->level) {
/* cnode is a nnode */
num = calc_nnode_num(row, col);
if (cnode->num != num) {
dbg_err("nnode num %d expected %d "
"parent num %d iip %d", cnode->num, num,
(nnode ? nnode->num : 0), cnode->iip);
return -EINVAL;
}
nn = (struct ubifs_nnode *)cnode;
while (iip < UBIFS_LPT_FANOUT) {
cn = nn->nbranch[iip].cnode;
if (cn) {
/* Go down */
row += 1;
col <<= UBIFS_LPT_FANOUT_SHIFT;
col += iip;
iip = 0;
cnode = cn;
break;
}
/* Go right */
iip += 1;
}
if (iip < UBIFS_LPT_FANOUT)
continue;
} else {
struct ubifs_pnode *pnode;
/* cnode is a pnode */
pnode = (struct ubifs_pnode *)cnode;
err = dbg_chk_pnode(c, pnode, col);
if (err)
return err;
}
/* Go up and to the right */
row -= 1;
col >>= UBIFS_LPT_FANOUT_SHIFT;
iip = cnode->iip + 1;
cnode = (struct ubifs_cnode *)nnode;
}
return 0;
}
#endif /* CONFIG_UBIFS_FS_DEBUG */
| gpl-2.0 |
soderstrom-rikard/adi-linux | drivers/staging/tidspbridge/rmgr/node.c | 2623 | 84727 | /*
* node.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP/BIOS Bridge Node Manager.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/list.h>
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/memdefs.h>
#include <dspbridge/proc.h>
#include <dspbridge/strm.h>
#include <dspbridge/sync.h>
#include <dspbridge/ntfy.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/cmm.h>
#include <dspbridge/cod.h>
#include <dspbridge/dev.h>
#include <dspbridge/msg.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/dbdcd.h>
#include <dspbridge/disp.h>
#include <dspbridge/rms_sh.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h>
#include <dspbridge/dspioctl.h>
/* ----------------------------------- Others */
#include <dspbridge/uuidutil.h>
/* ----------------------------------- This */
#include <dspbridge/nodepriv.h>
#include <dspbridge/node.h>
#include <dspbridge/dmm.h>
/* Static/Dynamic Loader includes */
#include <dspbridge/dbll.h>
#include <dspbridge/nldr.h>
#include <dspbridge/drv.h>
#include <dspbridge/resourcecleanup.h>
#include <_tiomap.h>
#include <dspbridge/dspdeh.h>
#define HOSTPREFIX "/host"
#define PIPEPREFIX "/dbpipe"
#define MAX_INPUTS(h) \
((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
#define MAX_OUTPUTS(h) \
((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
#define NODE_GET_PRIORITY(h) ((h)->prio)
#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
#define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
#define CREATEPHASE 1
#define EXECUTEPHASE 2
#define DELETEPHASE 3
/* Define default STRM parameters */
/*
* TBD: Put in header file, make global DSP_STRMATTRS with defaults,
* or make defaults configurable.
*/
#define DEFAULTBUFSIZE 32
#define DEFAULTNBUFS 2
#define DEFAULTSEGID 0
#define DEFAULTALIGNMENT 0
#define DEFAULTTIMEOUT 10000
#define RMSQUERYSERVER 0
#define RMSCONFIGURESERVER 1
#define RMSCREATENODE 2
#define RMSEXECUTENODE 3
#define RMSDELETENODE 4
#define RMSCHANGENODEPRIORITY 5
#define RMSREADMEMORY 6
#define RMSWRITEMEMORY 7
#define RMSCOPY 8
#define MAXTIMEOUT 2000
#define NUMRMSFXNS 9
#define PWR_TIMEOUT 500 /* default PWR timeout in msec */
#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
/*
* ======== node_mgr ========
*/
struct node_mgr {
struct dev_object *dev_obj; /* Device object */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
struct disp_object *disp_obj; /* Node dispatcher */
struct list_head node_list; /* List of all allocated nodes */
u32 num_nodes; /* Number of nodes in node_list */
u32 num_created; /* Number of nodes *created* on DSP */
DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
/* Channel allocation bitmap */
DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
/* DMA Channel allocation bitmap */
DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
/* Zero-Copy Channel alloc bitmap */
DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
struct ntfy_object *ntfy_obj; /* Manages registered notifications */
struct mutex node_mgr_lock; /* For critical sections */
u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
struct msg_mgr *msg_mgr_obj;
/* Processor properties needed by Node Dispatcher */
u32 num_chnls; /* Total number of channels */
u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
u32 chnl_buf_size; /* Buffer size for data to RMS */
int proc_family; /* eg, 5000 */
int proc_type; /* eg, 5510 */
u32 dsp_word_size; /* Size of DSP word on host bytes */
u32 dsp_data_mau_size; /* Size of DSP data MAU */
u32 dsp_mau_size; /* Size of MAU */
s32 min_pri; /* Minimum runtime priority for node */
s32 max_pri; /* Maximum runtime priority for node */
struct strm_mgr *strm_mgr_obj; /* STRM manager */
/* Loader properties */
struct nldr_object *nldr_obj; /* Handle to loader */
struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
};
/*
* ======== connecttype ========
*/
enum connecttype {
NOTCONNECTED = 0,
NODECONNECT,
HOSTCONNECT,
DEVICECONNECT,
};
/*
* ======== stream_chnl ========
*/
struct stream_chnl {
enum connecttype type; /* Type of stream connection */
u32 dev_id; /* pipe or channel id */
};
/*
* ======== node_object ========
*/
struct node_object {
struct list_head list_elem;
struct node_mgr *node_mgr; /* The manager of this node */
struct proc_object *processor; /* Back pointer to processor */
struct dsp_uuid node_uuid; /* Node's ID */
s32 prio; /* Node's current priority */
u32 timeout; /* Timeout for blocking NODE calls */
u32 heap_size; /* Heap Size */
u32 dsp_heap_virt_addr; /* Heap Size */
u32 gpp_heap_virt_addr; /* Heap Size */
enum node_type ntype; /* Type of node: message, task, etc */
enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
u32 num_inputs; /* Current number of inputs */
u32 num_outputs; /* Current number of outputs */
u32 max_input_index; /* Current max input stream index */
u32 max_output_index; /* Current max output stream index */
struct stream_chnl *inputs; /* Node's input streams */
struct stream_chnl *outputs; /* Node's output streams */
struct node_createargs create_args; /* Args for node create func */
nodeenv node_env; /* Environment returned by RMS */
struct dcd_genericobj dcd_props; /* Node properties from DCD */
struct dsp_cbdata *args; /* Optional args to pass to node */
struct ntfy_object *ntfy_obj; /* Manages registered notifications */
char *str_dev_name; /* device name, if device node */
struct sync_object *sync_done; /* Synchronize node_terminate */
s32 exit_status; /* execute function return status */
/* Information needed for node_get_attr() */
void *device_owner; /* If dev node, task that owns it */
u32 num_gpp_inputs; /* Current # of from GPP streams */
u32 num_gpp_outputs; /* Current # of to GPP streams */
/* Current stream connections */
struct dsp_streamconnect *stream_connect;
/* Message queue */
struct msg_queue *msg_queue_obj;
/* These fields used for SM messaging */
struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
/* Handle to pass to dynamic loader */
struct nldr_nodeobject *nldr_node_obj;
bool loaded; /* Code is (dynamically) loaded */
bool phase_split; /* Phases split in many libs or ovly */
};
/* Default buffer attributes */
static struct dsp_bufferattr node_dfltbufattrs = {
.cb_struct = 0,
.segment_id = 1,
.buf_alignment = 0,
};
static void delete_node(struct node_object *hnode,
struct process_context *pr_ctxt);
static void delete_node_mgr(struct node_mgr *hnode_mgr);
static void fill_stream_connect(struct node_object *node1,
struct node_object *node2, u32 stream1,
u32 stream2);
static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs);
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
u32 phase);
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
const struct dsp_uuid *node_uuid,
struct dcd_genericobj *dcd_prop);
static int get_proc_props(struct node_mgr *hnode_mgr,
struct dev_object *hdev_obj);
static int get_rms_fxns(struct node_mgr *hnode_mgr);
static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
u32 ul_num_bytes, u32 mem_space);
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space);
/* Dynamic loader functions. */
static struct node_ldr_fxns nldr_fxns = {
nldr_allocate,
nldr_create,
nldr_delete,
nldr_get_fxn_addr,
nldr_load,
nldr_unload,
};
enum node_state node_get_state(void *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
if (!pnode)
return -1;
return pnode->node_state;
}
/*
* ======== node_allocate ========
* Purpose:
* Allocate GPP resources to manage a node on the DSP.
*/
int node_allocate(struct proc_object *hprocessor,
const struct dsp_uuid *node_uuid,
const struct dsp_cbdata *pargs,
const struct dsp_nodeattrin *attr_in,
struct node_res_object **noderes,
struct process_context *pr_ctxt)
{
struct node_mgr *hnode_mgr;
struct dev_object *hdev_obj;
struct node_object *pnode = NULL;
enum node_type node_type = NODE_TASK;
struct node_msgargs *pmsg_args;
struct node_taskargs *ptask_args;
u32 num_streams;
struct bridge_drv_interface *intf_fxns;
int status = 0;
struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
u32 proc_id;
u32 pul_value;
u32 dynext_base;
u32 off_set = 0;
u32 ul_stack_seg_val;
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
u32 mapped_addr = 0;
u32 map_attrs = 0x0;
struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif
void *node_res;
*noderes = NULL;
status = proc_get_processor_id(hprocessor, &proc_id);
if (proc_id != DSP_UNIT)
goto func_end;
status = proc_get_dev_object(hprocessor, &hdev_obj);
if (!status) {
status = dev_get_node_manager(hdev_obj, &hnode_mgr);
if (hnode_mgr == NULL)
status = -EPERM;
}
if (status)
goto func_end;
status = dev_get_bridge_context(hdev_obj, &pbridge_context);
if (!pbridge_context) {
status = -EFAULT;
goto func_end;
}
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
/* Assuming that 0 is not a valid function address */
if (hnode_mgr->fxn_addrs[0] == 0) {
/* No RMS on target - we currently can't handle this */
pr_err("%s: Failed, no RMS in base image\n", __func__);
status = -EPERM;
} else {
/* Validate attr_in fields, if non-NULL */
if (attr_in) {
/* Check if attr_in->prio is within range */
if (attr_in->prio < hnode_mgr->min_pri ||
attr_in->prio > hnode_mgr->max_pri)
status = -EDOM;
}
}
/* Allocate node object and fill in */
if (status)
goto func_end;
pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
if (pnode == NULL) {
status = -ENOMEM;
goto func_end;
}
pnode->node_mgr = hnode_mgr;
/* This critical section protects get_node_props */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Get dsp_ndbprops from node database */
status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
&(pnode->dcd_props));
if (status)
goto func_cont;
pnode->node_uuid = *node_uuid;
pnode->processor = hprocessor;
pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
/* Currently only C64 DSP builds support Node Dynamic * heaps */
/* Allocate memory for node heap */
pnode->create_args.asa.task_arg_obj.heap_size = 0;
pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
if (!attr_in)
goto func_cont;
/* Check if we have a user allocated node heap */
if (!(attr_in->pgpp_virt_addr))
goto func_cont;
/* check for page aligned Heap size */
if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
__func__, attr_in->heap_size);
status = -EINVAL;
} else {
pnode->create_args.asa.task_arg_obj.heap_size =
attr_in->heap_size;
pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
(u32) attr_in->pgpp_virt_addr;
}
if (status)
goto func_cont;
status = proc_reserve_memory(hprocessor,
pnode->create_args.asa.task_arg_obj.
heap_size + PAGE_SIZE,
(void **)&(pnode->create_args.asa.
task_arg_obj.dsp_heap_res_addr),
pr_ctxt);
if (status) {
pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
__func__, status);
goto func_cont;
}
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = DSP_EHANDLE;
goto func_cont;
}
dmm_mem_map_dump(dmm_mgr);
#endif
map_attrs |= DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPVIRTUALADDR;
status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
pnode->create_args.asa.task_arg_obj.heap_size,
(void *)pnode->create_args.asa.task_arg_obj.
dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
pr_ctxt);
if (status)
pr_err("%s: Failed to map memory for Heap: 0x%x\n",
__func__, status);
else
pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
(u32) mapped_addr;
func_cont:
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (attr_in != NULL) {
/* Overrides of NBD properties */
pnode->timeout = attr_in->timeout;
pnode->prio = attr_in->prio;
}
/* Create object to manage notifications */
if (!status) {
pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (pnode->ntfy_obj)
ntfy_init(pnode->ntfy_obj);
else
status = -ENOMEM;
}
if (!status) {
node_type = node_get_type(pnode);
/* Allocate dsp_streamconnect array for device, task, and
* dais socket nodes. */
if (node_type != NODE_MESSAGE) {
num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
pnode->stream_connect = kzalloc(num_streams *
sizeof(struct dsp_streamconnect),
GFP_KERNEL);
if (num_streams > 0 && pnode->stream_connect == NULL)
status = -ENOMEM;
}
if (!status && (node_type == NODE_TASK ||
node_type == NODE_DAISSOCKET)) {
/* Allocate arrays for maintainig stream connections */
pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
sizeof(struct stream_chnl), GFP_KERNEL);
pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
sizeof(struct stream_chnl), GFP_KERNEL);
ptask_args = &(pnode->create_args.asa.task_arg_obj);
ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
sizeof(struct node_strmdef),
GFP_KERNEL);
ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
sizeof(struct node_strmdef),
GFP_KERNEL);
if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
ptask_args->strm_in_def
== NULL))
|| (MAX_OUTPUTS(pnode) > 0
&& (pnode->outputs == NULL
|| ptask_args->strm_out_def == NULL)))
status = -ENOMEM;
}
}
if (!status && (node_type != NODE_DEVICE)) {
/* Create an event that will be posted when RMS_EXIT is
* received. */
pnode->sync_done = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (pnode->sync_done)
sync_init_event(pnode->sync_done);
else
status = -ENOMEM;
if (!status) {
/*Get the shared mem mgr for this nodes dev object */
status = cmm_get_handle(hprocessor, &hcmm_mgr);
if (!status) {
/* Allocate a SM addr translator for this node
* w/ deflt attr */
status = cmm_xlator_create(&pnode->xlator,
hcmm_mgr, NULL);
}
}
if (!status) {
/* Fill in message args */
if ((pargs != NULL) && (pargs->cb_data > 0)) {
pmsg_args =
&(pnode->create_args.asa.node_msg_args);
pmsg_args->pdata = kzalloc(pargs->cb_data,
GFP_KERNEL);
if (pmsg_args->pdata == NULL) {
status = -ENOMEM;
} else {
pmsg_args->arg_length = pargs->cb_data;
memcpy(pmsg_args->pdata,
pargs->node_data,
pargs->cb_data);
}
}
}
}
if (!status && node_type != NODE_DEVICE) {
/* Create a message queue for this node */
intf_fxns = hnode_mgr->intf_fxns;
status =
(*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
&pnode->msg_queue_obj,
0,
pnode->create_args.asa.
node_msg_args.max_msgs,
pnode);
}
if (!status) {
/* Create object for dynamic loading */
status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
(void *)pnode,
&pnode->dcd_props.
obj_data.node_obj,
&pnode->
nldr_node_obj,
&pnode->phase_split);
}
/* Compare value read from Node Properties and check if it is same as
* STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
* GPP Address, Read the value in that address and override the
* stack_seg value in task args */
if (!status &&
(char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name != NULL) {
if (strcmp((char *)
pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name, STACKSEGLABEL) == 0) {
void __iomem *stack_seg;
u32 stack_seg_pa;
status =
hnode_mgr->nldr_fxns.
get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
&dynext_base);
if (status)
pr_err("%s: Failed to get addr for DYNEXT_BEG"
" status = 0x%x\n", __func__, status);
status =
hnode_mgr->nldr_fxns.
get_fxn_addr(pnode->nldr_node_obj,
"L1DSRAM_HEAP", &pul_value);
if (status)
pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
" status = 0x%x\n", __func__, status);
host_res = pbridge_context->resources;
if (!host_res)
status = -EPERM;
if (status) {
pr_err("%s: Failed to get host resource, status"
" = 0x%x\n", __func__, status);
goto func_end;
}
off_set = pul_value - dynext_base;
stack_seg_pa = host_res->mem_phys[1] + off_set;
stack_seg = ioremap(stack_seg_pa, SZ_32);
if (!stack_seg) {
status = -ENOMEM;
goto func_end;
}
ul_stack_seg_val = readl(stack_seg);
iounmap(stack_seg);
dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
" 0x%x\n", __func__, ul_stack_seg_val,
host_res->mem_base[1] + off_set);
pnode->create_args.asa.task_arg_obj.stack_seg =
ul_stack_seg_val;
}
}
if (!status) {
/* Add the node to the node manager's list of allocated
* nodes. */
NODE_SET_STATE(pnode, NODE_ALLOCATED);
mutex_lock(&hnode_mgr->node_mgr_lock);
list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
++(hnode_mgr->num_nodes);
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
/* Preset this to assume phases are split
* (for overlay and dll) */
pnode->phase_split = true;
/* Notify all clients registered for DSP_NODESTATECHANGE. */
proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
} else {
/* Cleanup */
if (pnode)
delete_node(pnode, pr_ctxt);
}
if (!status) {
status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
if (status) {
delete_node(pnode, pr_ctxt);
goto func_end;
}
*noderes = (struct node_res_object *)node_res;
drv_proc_node_update_heap_status(node_res, true);
drv_proc_node_update_status(node_res, true);
}
func_end:
dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
"node_res: %p status: 0x%x\n", __func__, hprocessor,
node_uuid, pargs, attr_in, noderes, status);
return status;
}
/*
* ======== node_alloc_msg_buf ========
* Purpose:
* Allocates buffer for zero copy messaging.
*/
DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
struct dsp_bufferattr *pattr,
u8 **pbuffer)
{
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
bool va_flag = false;
bool set_info;
u32 proc_id;
if (!pnode)
status = -EFAULT;
else if (node_get_type(pnode) == NODE_DEVICE)
status = -EPERM;
if (status)
goto func_end;
if (pattr == NULL)
pattr = &node_dfltbufattrs; /* set defaults */
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id != DSP_UNIT) {
goto func_end;
}
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
* virt address, so set this info in this node's translator
* object for future ref. If MEM_GETVIRTUALSEGID then retrieve
* virtual address from node's translator. */
if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
(pattr->segment_id & MEM_GETVIRTUALSEGID)) {
va_flag = true;
set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
true : false;
/* Clear mask bits */
pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
/* Set/get this node's translators virtual address base/size */
status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
pattr->segment_id, set_info);
}
if (!status && (!va_flag)) {
if (pattr->segment_id != 1) {
/* Node supports single SM segment only. */
status = -EBADR;
}
/* Arbitrary SM buffer alignment not supported for host side
* allocs, but guaranteed for the following alignment
* values. */
switch (pattr->buf_alignment) {
case 0:
case 1:
case 2:
case 4:
break;
default:
/* alignment value not supportted */
status = -EPERM;
break;
}
if (!status) {
/* allocate physical buffer from seg_id in node's
* translator */
(void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
usize);
if (*pbuffer == NULL) {
pr_err("%s: error - Out of shared memory\n",
__func__);
status = -ENOMEM;
}
}
}
func_end:
return status;
}
/*
* ======== node_change_priority ========
* Purpose:
* Change the priority of a node in the allocated state, or that is
* currently running or paused on the target.
*/
int node_change_priority(struct node_object *hnode, s32 prio)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
enum node_state state;
int status = 0;
u32 proc_id;
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
} else {
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
status = -EDOM;
}
if (status)
goto func_end;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
NODE_SET_PRIORITY(hnode, prio);
} else {
if (state != NODE_RUNNING) {
status = -EBADR;
goto func_cont;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == DSP_UNIT) {
status =
disp_node_change_priority(hnode_mgr->disp_obj,
hnode,
hnode_mgr->fxn_addrs
[RMSCHANGENODEPRIORITY],
hnode->node_env, prio);
}
if (status >= 0)
NODE_SET_PRIORITY(hnode, prio);
}
func_cont:
/* Leave critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== node_connect ========
* Purpose:
* Connect two nodes on the DSP, or a node on the DSP to the GPP.
*/
int node_connect(struct node_object *node1, u32 stream1,
struct node_object *node2,
u32 stream2, struct dsp_strmattr *pattrs,
struct dsp_cbdata *conn_param)
{
struct node_mgr *hnode_mgr;
char *pstr_dev_name = NULL;
enum node_type node1_type = NODE_TASK;
enum node_type node2_type = NODE_TASK;
enum dsp_strmmode strm_mode;
struct node_strmdef *pstrm_def;
struct node_strmdef *input = NULL;
struct node_strmdef *output = NULL;
struct node_object *dev_node_obj;
struct node_object *hnode;
struct stream_chnl *pstream;
u32 pipe_id;
u32 chnl_id;
s8 chnl_mode;
u32 dw_length;
int status = 0;
if (!node1 || !node2)
return -EFAULT;
/* The two nodes must be on the same processor */
if (node1 != (struct node_object *)DSP_HGPPNODE &&
node2 != (struct node_object *)DSP_HGPPNODE &&
node1->node_mgr != node2->node_mgr)
return -EPERM;
/* Cannot connect a node to itself */
if (node1 == node2)
return -EPERM;
/* node_get_type() will return NODE_GPP if hnode = DSP_HGPPNODE. */
node1_type = node_get_type(node1);
node2_type = node_get_type(node2);
/* Check stream indices ranges */
if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
stream1 >= MAX_OUTPUTS(node1)) ||
(node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
stream2 >= MAX_INPUTS(node2)))
return -EINVAL;
/*
* Only the following types of connections are allowed:
* task/dais socket < == > task/dais socket
* task/dais socket < == > device
* task/dais socket < == > GPP
*
* ie, no message nodes, and at least one task or dais
* socket node.
*/
if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
(node1_type != NODE_TASK &&
node1_type != NODE_DAISSOCKET &&
node2_type != NODE_TASK &&
node2_type != NODE_DAISSOCKET))
return -EPERM;
/*
* Check stream mode. Default is STRMMODE_PROCCOPY.
*/
if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
return -EPERM; /* illegal stream mode */
if (node1_type != NODE_GPP) {
hnode_mgr = node1->node_mgr;
} else {
hnode_mgr = node2->node_mgr;
}
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Nodes must be in the allocated state */
if (node1_type != NODE_GPP &&
node_get_state(node1) != NODE_ALLOCATED) {
status = -EBADR;
goto out_unlock;
}
if (node2_type != NODE_GPP &&
node_get_state(node2) != NODE_ALLOCATED) {
status = -EBADR;
goto out_unlock;
}
/*
* Check that stream indices for task and dais socket nodes
* are not already be used. (Device nodes checked later)
*/
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
output = &(node1->create_args.asa.
task_arg_obj.strm_out_def[stream1]);
if (output->sz_device) {
status = -EISCONN;
goto out_unlock;
}
}
if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
input = &(node2->create_args.asa.
task_arg_obj.strm_in_def[stream2]);
if (input->sz_device) {
status = -EISCONN;
goto out_unlock;
}
}
/* Connecting two task nodes? */
if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
(node2_type == NODE_TASK ||
node2_type == NODE_DAISSOCKET)) {
/* Find available pipe */
pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
if (pipe_id == MAXPIPES) {
status = -ECONNREFUSED;
goto out_unlock;
}
set_bit(pipe_id, hnode_mgr->pipe_map);
node1->outputs[stream1].type = NODECONNECT;
node2->inputs[stream2].type = NODECONNECT;
node1->outputs[stream1].dev_id = pipe_id;
node2->inputs[stream2].dev_id = pipe_id;
output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
if (!output->sz_device || !input->sz_device) {
/* Undo the connection */
kfree(output->sz_device);
kfree(input->sz_device);
clear_bit(pipe_id, hnode_mgr->pipe_map);
status = -ENOMEM;
goto out_unlock;
}
/* Copy "/dbpipe<pipId>" name to device names */
sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
strcpy(input->sz_device, output->sz_device);
}
/* Connecting task node to host? */
if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
if (!pstr_dev_name) {
status = -ENOMEM;
goto out_unlock;
}
chnl_mode = (node1_type == NODE_GPP) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
/*
* Reserve a channel id. We need to put the name "/host<id>"
* in the node's create_args, but the host
* side channel will not be opened until DSPStream_Open is
* called for this node.
*/
strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
switch (strm_mode) {
case STRMMODE_RDMA:
chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS) {
set_bit(chnl_id, hnode_mgr->dma_chnl_map);
/* dma chans are 2nd transport chnl set
* ids(e.g. 16-31) */
chnl_id = chnl_id + hnode_mgr->num_chnls;
}
break;
case STRMMODE_ZEROCOPY:
chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS) {
set_bit(chnl_id, hnode_mgr->zc_chnl_map);
/* zero-copy chans are 3nd transport set
* (e.g. 32-47) */
chnl_id = chnl_id +
(2 * hnode_mgr->num_chnls);
}
break;
case STRMMODE_PROCCOPY:
chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS)
set_bit(chnl_id, hnode_mgr->chnl_map);
break;
default:
status = -EINVAL;
goto out_unlock;
}
if (chnl_id == CHNL_MAXCHANNELS) {
status = -ECONNREFUSED;
goto out_unlock;
}
if (node1 == (struct node_object *)DSP_HGPPNODE) {
node2->inputs[stream2].type = HOSTCONNECT;
node2->inputs[stream2].dev_id = chnl_id;
input->sz_device = pstr_dev_name;
} else {
node1->outputs[stream1].type = HOSTCONNECT;
node1->outputs[stream1].dev_id = chnl_id;
output->sz_device = pstr_dev_name;
}
sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
}
/* Connecting task node to device node? */
if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
if (node2_type == NODE_DEVICE) {
/* node1 == > device */
dev_node_obj = node2;
hnode = node1;
pstream = &(node1->outputs[stream1]);
pstrm_def = output;
} else {
/* device == > node2 */
dev_node_obj = node1;
hnode = node2;
pstream = &(node2->inputs[stream2]);
pstrm_def = input;
}
/* Set up create args */
pstream->type = DEVICECONNECT;
dw_length = strlen(dev_node_obj->str_dev_name);
if (conn_param)
pstrm_def->sz_device = kzalloc(dw_length + 1 +
conn_param->cb_data,
GFP_KERNEL);
else
pstrm_def->sz_device = kzalloc(dw_length + 1,
GFP_KERNEL);
if (!pstrm_def->sz_device) {
status = -ENOMEM;
goto out_unlock;
}
/* Copy device name */
strncpy(pstrm_def->sz_device,
dev_node_obj->str_dev_name, dw_length);
if (conn_param)
strncat(pstrm_def->sz_device,
(char *)conn_param->node_data,
(u32) conn_param->cb_data);
dev_node_obj->device_owner = hnode;
}
/* Fill in create args */
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
node1->create_args.asa.task_arg_obj.num_outputs++;
fill_stream_def(node1, output, pattrs);
}
if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
node2->create_args.asa.task_arg_obj.num_inputs++;
fill_stream_def(node2, input, pattrs);
}
/* Update node1 and node2 stream_connect */
if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
node1->num_outputs++;
if (stream1 > node1->max_output_index)
node1->max_output_index = stream1;
}
if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
node2->num_inputs++;
if (stream2 > node2->max_input_index)
node2->max_input_index = stream2;
}
fill_stream_connect(node1, node2, stream1, stream2);
/* end of sync_enter_cs */
/* Exit critical section */
out_unlock:
if (status && pstr_dev_name)
kfree(pstr_dev_name);
mutex_unlock(&hnode_mgr->node_mgr_lock);
dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
"pattrs: %p status: 0x%x\n", __func__, node1,
stream1, node2, stream2, pattrs, status);
return status;
}
/*
* ======== node_create ========
* Purpose:
* Create a node on the DSP by remotely calling the node's create function.
*/
int node_create(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr;
struct bridge_drv_interface *intf_fxns;
u32 ul_create_fxn;
enum node_type node_type;
int status = 0;
int status1 = 0;
struct dsp_cbdata cb_data;
u32 proc_id = 255;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
#endif
if (!pnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to create
new node */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
/* create struct dsp_cbdata struct for PWR calls */
cb_data.cb_data = PWR_TIMEOUT;
node_type = node_get_type(hnode);
hnode_mgr = hnode->node_mgr;
intf_fxns = hnode_mgr->intf_fxns;
/* Get access to node dispatcher */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Check node state */
if (node_get_state(hnode) != NODE_ALLOCATED)
status = -EBADR;
if (!status)
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont2;
if (proc_id != DSP_UNIT)
goto func_cont2;
/* Make sure streams are properly connected */
if ((hnode->num_inputs && hnode->max_input_index >
hnode->num_inputs - 1) ||
(hnode->num_outputs && hnode->max_output_index >
hnode->num_outputs - 1))
status = -ENOTCONN;
if (!status) {
/* If node's create function is not loaded, load it */
/* Boost the OPP level to max level that DSP can be requested */
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
if (pdata->cpu_set_freq)
(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
#endif
status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
NLDR_CREATE);
/* Get address of node's create function */
if (!status) {
hnode->loaded = true;
if (node_type != NODE_DEVICE) {
status = get_fxn_address(hnode, &ul_create_fxn,
CREATEPHASE);
}
} else {
pr_err("%s: failed to load create code: 0x%x\n",
__func__, status);
}
/* Request the lowest OPP level */
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
if (pdata->cpu_set_freq)
(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
#endif
/* Get address of iAlg functions, if socket node */
if (!status) {
if (node_type == NODE_DAISSOCKET) {
status = hnode_mgr->nldr_fxns.get_fxn_addr
(hnode->nldr_node_obj,
hnode->dcd_props.obj_data.node_obj.
str_i_alg_name,
&hnode->create_args.asa.
task_arg_obj.dais_arg);
}
}
}
if (!status) {
if (node_type != NODE_DEVICE) {
status = disp_node_create(hnode_mgr->disp_obj, hnode,
hnode_mgr->fxn_addrs
[RMSCREATENODE],
ul_create_fxn,
&(hnode->create_args),
&(hnode->node_env));
if (status >= 0) {
/* Set the message queue id to the node env
* pointer */
intf_fxns = hnode_mgr->intf_fxns;
(*intf_fxns->msg_set_queue_id) (hnode->
msg_queue_obj,
hnode->node_env);
}
}
}
/* Phase II/Overlays: Create, execute, delete phases possibly in
* different files/sections. */
if (hnode->loaded && hnode->phase_split) {
/* If create code was dynamically loaded, we can now unload
* it. */
status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
NLDR_CREATE);
hnode->loaded = false;
}
if (status1)
pr_err("%s: Failed to unload create code: 0x%x\n",
__func__, status1);
func_cont2:
/* Update node state and node manager state */
if (status >= 0) {
NODE_SET_STATE(hnode, NODE_CREATED);
hnode_mgr->num_created++;
goto func_cont;
}
if (status != -EBADR) {
/* Put back in NODE_ALLOCATED state if error occurred */
NODE_SET_STATE(hnode, NODE_ALLOCATED);
}
func_cont:
/* Free access to node dispatcher */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
if (status >= 0) {
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
hnode, status);
return status;
}
/*
* ======== node_create_mgr ========
* Purpose:
* Create a NODE Manager object.
*/
int node_create_mgr(struct node_mgr **node_man,
struct dev_object *hdev_obj)
{
u32 i;
struct node_mgr *node_mgr_obj = NULL;
struct disp_attr disp_attr_obj;
char *sz_zl_file = "";
struct nldr_attrs nldr_attrs_obj;
int status = 0;
u8 dev_type;
*node_man = NULL;
/* Allocate Node manager object */
node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
if (!node_mgr_obj)
return -ENOMEM;
node_mgr_obj->dev_obj = hdev_obj;
node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (!node_mgr_obj->ntfy_obj) {
status = -ENOMEM;
goto out_err;
}
ntfy_init(node_mgr_obj->ntfy_obj);
INIT_LIST_HEAD(&node_mgr_obj->node_list);
dev_get_dev_type(hdev_obj, &dev_type);
status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
if (status)
goto out_err;
status = get_proc_props(node_mgr_obj, hdev_obj);
if (status)
goto out_err;
/* Create NODE Dispatcher */
disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
disp_attr_obj.proc_family = node_mgr_obj->proc_family;
disp_attr_obj.proc_type = node_mgr_obj->proc_type;
status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
if (status)
goto out_err;
/* Create a STRM Manager */
status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
if (status)
goto out_err;
dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
/* Get msg_ctrl queue manager */
dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
mutex_init(&node_mgr_obj->node_mgr_lock);
/* Block out reserved channels */
for (i = 0; i < node_mgr_obj->chnl_offset; i++)
set_bit(i, node_mgr_obj->chnl_map);
/* Block out channels reserved for RMS */
set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
/* NO RM Server on the IVA */
if (dev_type != IVA_UNIT) {
/* Get addresses of any RMS functions loaded */
status = get_rms_fxns(node_mgr_obj);
if (status)
goto out_err;
}
/* Get loader functions and create loader */
node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
nldr_attrs_obj.ovly = ovly;
nldr_attrs_obj.write = mem_write;
nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
hdev_obj,
&nldr_attrs_obj);
if (status)
goto out_err;
*node_man = node_mgr_obj;
return status;
out_err:
delete_node_mgr(node_mgr_obj);
return status;
}
/*
* ======== node_delete ========
* Purpose:
* Delete a node on the DSP by remotely calling the node's delete function.
* Loads the node's delete function if necessary. Free GPP side resources
* after node's delete function returns.
*/
int node_delete(struct node_res_object *noderes,
struct process_context *pr_ctxt)
{
struct node_object *pnode = noderes->node;
struct node_mgr *hnode_mgr;
struct proc_object *hprocessor;
struct disp_object *disp_obj;
u32 ul_delete_fxn;
enum node_type node_type;
enum node_state state;
int status = 0;
int status1 = 0;
struct dsp_cbdata cb_data;
u32 proc_id;
struct bridge_drv_interface *intf_fxns;
void *node_res = noderes;
struct dsp_processorstate proc_state;
if (!pnode) {
status = -EFAULT;
goto func_end;
}
/* create struct dsp_cbdata struct for PWR call */
cb_data.cb_data = PWR_TIMEOUT;
hnode_mgr = pnode->node_mgr;
hprocessor = pnode->processor;
disp_obj = hnode_mgr->disp_obj;
node_type = node_get_type(pnode);
intf_fxns = hnode_mgr->intf_fxns;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(pnode);
/* Execute delete phase code for non-device node in all cases
* except when the node was only allocated. Delete phase must be
* executed even if create phase was executed, but failed.
* If the node environment pointer is non-NULL, the delete phase
* code must be executed. */
if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
node_type != NODE_DEVICE) {
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont1;
if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
/* If node has terminated, execute phase code will
* have already been unloaded in node_on_exit(). If the
* node is PAUSED, the execute phase is loaded, and it
* is now ok to unload it. If the node is running, we
* will unload the execute phase only after deleting
* the node. */
if (state == NODE_PAUSED && pnode->loaded &&
pnode->phase_split) {
/* Ok to unload execute code as long as node
* is not * running */
status1 =
hnode_mgr->nldr_fxns.
unload(pnode->nldr_node_obj,
NLDR_EXECUTE);
pnode->loaded = false;
NODE_SET_STATE(pnode, NODE_DONE);
}
/* Load delete phase code if not loaded or if haven't
* * unloaded EXECUTE phase */
if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
pnode->phase_split) {
status =
hnode_mgr->nldr_fxns.
load(pnode->nldr_node_obj, NLDR_DELETE);
if (!status)
pnode->loaded = true;
else
pr_err("%s: fail - load delete code:"
" 0x%x\n", __func__, status);
}
}
func_cont1:
if (!status) {
/* Unblock a thread trying to terminate the node */
(void)sync_set_event(pnode->sync_done);
if (proc_id == DSP_UNIT) {
/* ul_delete_fxn = address of node's delete
* function */
status = get_fxn_address(pnode, &ul_delete_fxn,
DELETEPHASE);
} else if (proc_id == IVA_UNIT)
ul_delete_fxn = (u32) pnode->node_env;
if (!status) {
status = proc_get_state(hprocessor,
&proc_state,
sizeof(struct
dsp_processorstate));
if (proc_state.proc_state != PROC_ERROR) {
status =
disp_node_delete(disp_obj, pnode,
hnode_mgr->
fxn_addrs
[RMSDELETENODE],
ul_delete_fxn,
pnode->node_env);
} else
NODE_SET_STATE(pnode, NODE_DONE);
/* Unload execute, if not unloaded, and delete
* function */
if (state == NODE_RUNNING &&
pnode->phase_split) {
status1 =
hnode_mgr->nldr_fxns.
unload(pnode->nldr_node_obj,
NLDR_EXECUTE);
}
if (status1)
pr_err("%s: fail - unload execute code:"
" 0x%x\n", __func__, status1);
status1 =
hnode_mgr->nldr_fxns.unload(pnode->
nldr_node_obj,
NLDR_DELETE);
pnode->loaded = false;
if (status1)
pr_err("%s: fail - unload delete code: "
"0x%x\n", __func__, status1);
}
}
}
/* Free host side resources even if a failure occurred */
/* Remove node from hnode_mgr->node_list */
list_del(&pnode->list_elem);
hnode_mgr->num_nodes--;
/* Decrement count of nodes created on DSP */
if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
(pnode->node_env != (u32) NULL)))
hnode_mgr->num_created--;
/* Free host-side resources allocated by node_create()
* delete_node() fails if SM buffers not freed by client! */
drv_proc_node_update_status(node_res, false);
delete_node(pnode, pr_ctxt);
/*
* Release all Node resources and its context
*/
idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
kfree(node_res);
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
func_end:
dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
return status;
}
/*
* ======== node_delete_mgr ========
* Purpose:
* Delete the NODE Manager.
*/
int node_delete_mgr(struct node_mgr *hnode_mgr)
{
if (!hnode_mgr)
return -EFAULT;
delete_node_mgr(hnode_mgr);
return 0;
}
/*
* ======== node_enum_nodes ========
* Purpose:
* Enumerate currently allocated nodes.
*/
int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
u32 node_tab_size, u32 *pu_num_nodes,
u32 *pu_allocated)
{
struct node_object *hnode;
u32 i = 0;
int status = 0;
if (!hnode_mgr) {
status = -EFAULT;
goto func_end;
}
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
if (hnode_mgr->num_nodes > node_tab_size) {
*pu_allocated = hnode_mgr->num_nodes;
*pu_num_nodes = 0;
status = -EINVAL;
} else {
list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
node_tab[i++] = hnode;
*pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
}
/* end of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== node_free_msg_buf ========
* Purpose:
* Frees the message buffer.
*/
int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
struct dsp_bufferattr *pattr)
{
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
u32 proc_id;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == DSP_UNIT) {
if (!status) {
if (pattr == NULL) {
/* set defaults */
pattr = &node_dfltbufattrs;
}
/* Node supports single SM segment only */
if (pattr->segment_id != 1)
status = -EBADR;
/* pbuffer is clients Va. */
status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
}
} else {
}
func_end:
return status;
}
/*
* ======== node_get_attr ========
* Purpose:
* Copy the current attributes of the specified node into a dsp_nodeattr
* structure.
*/
int node_get_attr(struct node_object *hnode,
struct dsp_nodeattr *pattr, u32 attr_size)
{
struct node_mgr *hnode_mgr;
if (!hnode)
return -EFAULT;
hnode_mgr = hnode->node_mgr;
/* Enter hnode_mgr critical section since we're accessing
* data that could be changed by node_change_priority() and
* node_connect(). */
mutex_lock(&hnode_mgr->node_mgr_lock);
pattr->cb_struct = sizeof(struct dsp_nodeattr);
/* dsp_nodeattrin */
pattr->in_node_attr_in.cb_struct =
sizeof(struct dsp_nodeattrin);
pattr->in_node_attr_in.prio = hnode->prio;
pattr->in_node_attr_in.timeout = hnode->timeout;
pattr->in_node_attr_in.heap_size =
hnode->create_args.asa.task_arg_obj.heap_size;
pattr->in_node_attr_in.pgpp_virt_addr = (void *)
hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
pattr->node_attr_inputs = hnode->num_gpp_inputs;
pattr->node_attr_outputs = hnode->num_gpp_outputs;
/* dsp_nodeinfo */
get_node_info(hnode, &(pattr->node_info));
/* end of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
return 0;
}
/*
* ======== node_get_channel_id ========
* Purpose:
* Get the channel index reserved for a stream connection between the
* host and a node.
*/
int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
u32 *chan_id)
{
enum node_type node_type;
int status = -EINVAL;
if (!hnode) {
status = -EFAULT;
return status;
}
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
status = -EPERM;
return status;
}
if (dir == DSP_TONODE) {
if (index < MAX_INPUTS(hnode)) {
if (hnode->inputs[index].type == HOSTCONNECT) {
*chan_id = hnode->inputs[index].dev_id;
status = 0;
}
}
} else {
if (index < MAX_OUTPUTS(hnode)) {
if (hnode->outputs[index].type == HOSTCONNECT) {
*chan_id = hnode->outputs[index].dev_id;
status = 0;
}
}
}
return status;
}
/*
* ======== node_get_message ========
* Purpose:
* Retrieve a message from a node on the DSP.
*/
int node_get_message(struct node_object *hnode,
struct dsp_msg *message, u32 utimeout)
{
struct node_mgr *hnode_mgr;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
int status = 0;
void *tmp_buf;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to get the
message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET) {
status = -EPERM;
goto func_end;
}
/* This function will block unless a message is available. Since
* DSPNode_RegisterNotify() allows notification when a message
* is available, the system can be designed so that
* DSPNode_GetMessage() is only called when a message is
* available. */
intf_fxns = hnode_mgr->intf_fxns;
status =
(*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
/* Check if message contains SM descriptor */
if (status || !(message->cmd & DSP_RMSBUFDESC))
goto func_end;
/* Translate DSP byte addr to GPP Va. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
(void *)(message->arg1 *
hnode->node_mgr->
dsp_word_size), CMM_DSPPA2PA);
if (tmp_buf != NULL) {
/* now convert this GPP Pa to Va */
tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
CMM_PA2VA);
if (tmp_buf != NULL) {
/* Adjust SM size in msg */
message->arg1 = (u32) tmp_buf;
message->arg2 *= hnode->node_mgr->dsp_word_size;
} else {
status = -ESRCH;
}
} else {
status = -ESRCH;
}
func_end:
dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
hnode, message, utimeout);
return status;
}
/*
* ======== node_get_nldr_obj ========
*/
int node_get_nldr_obj(struct node_mgr *hnode_mgr,
struct nldr_object **nldr_ovlyobj)
{
int status = 0;
struct node_mgr *node_mgr_obj = hnode_mgr;
if (!hnode_mgr)
status = -EFAULT;
else
*nldr_ovlyobj = node_mgr_obj->nldr_obj;
return status;
}
/*
* ======== node_get_strm_mgr ========
* Purpose:
* Returns the Stream manager.
*/
int node_get_strm_mgr(struct node_object *hnode,
struct strm_mgr **strm_man)
{
int status = 0;
if (!hnode)
status = -EFAULT;
else
*strm_man = hnode->node_mgr->strm_mgr_obj;
return status;
}
/*
* ======== node_get_load_type ========
*/
enum nldr_loadtype node_get_load_type(struct node_object *hnode)
{
if (!hnode) {
dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
return -1;
} else {
return hnode->dcd_props.obj_data.node_obj.load_type;
}
}
/*
* ======== node_get_timeout ========
* Purpose:
* Returns the timeout value for this node.
*/
u32 node_get_timeout(struct node_object *hnode)
{
if (!hnode) {
dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
return 0;
} else {
return hnode->timeout;
}
}
/*
* ======== node_get_type ========
* Purpose:
* Returns the node type.
*/
enum node_type node_get_type(struct node_object *hnode)
{
enum node_type node_type;
if (hnode == (struct node_object *)DSP_HGPPNODE)
node_type = NODE_GPP;
else {
if (!hnode)
node_type = -1;
else
node_type = hnode->ntype;
}
return node_type;
}
/*
* ======== node_on_exit ========
* Purpose:
* Gets called when RMS_EXIT is received for a node.
*/
void node_on_exit(struct node_object *hnode, s32 node_status)
{
if (!hnode)
return;
/* Set node state to done */
NODE_SET_STATE(hnode, NODE_DONE);
hnode->exit_status = node_status;
if (hnode->loaded && hnode->phase_split) {
(void)hnode->node_mgr->nldr_fxns.unload(hnode->
nldr_node_obj,
NLDR_EXECUTE);
hnode->loaded = false;
}
/* Unblock call to node_terminate */
(void)sync_set_event(hnode->sync_done);
/* Notify clients */
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
/*
* ======== node_pause ========
* Purpose:
* Suspend execution of a node currently running on the DSP.
*/
int node_pause(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
enum node_type node_type;
enum node_state state;
struct node_mgr *hnode_mgr;
int status = 0;
u32 proc_id;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
} else {
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
}
if (status)
goto func_end;
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == IVA_UNIT)
status = -ENOSYS;
if (!status) {
hnode_mgr = hnode->node_mgr;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
/* Check node state */
if (state != NODE_RUNNING)
status = -EBADR;
if (status)
goto func_cont;
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_cont;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_cont;
}
status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
hnode->node_env, NODE_SUSPENDEDPRI);
/* Update state */
if (status >= 0)
NODE_SET_STATE(hnode, NODE_PAUSED);
func_cont:
/* End of sync_enter_cs */
/* Leave critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (status >= 0) {
proc_notify_clients(hnode->processor,
DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
}
func_end:
dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
return status;
}
/*
* ======== node_put_message ========
* Purpose:
* Send a message to a message node, task node, or XDAIS socket node. This
* function will block until the message stream can accommodate the
* message, or a timeout occurs.
*/
int node_put_message(struct node_object *hnode,
const struct dsp_msg *pmsg, u32 utimeout)
{
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
enum node_state state;
int status = 0;
void *tmp_buf;
struct dsp_msg new_msg;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in bad state then don't attempt sending the
message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET)
status = -EPERM;
if (!status) {
/* Check node state. Can't send messages to a node after
* we've sent the RMS_EXIT command. There is still the
* possibility that node_terminate can be called after we've
* checked the state. Could add another SYNC object to
* prevent this (can't use node_mgr_lock, since we don't
* want to block other NODE functions). However, the node may
* still exit on its own, before this message is sent. */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state == NODE_TERMINATING || state == NODE_DONE)
status = -EBADR;
/* end of sync_enter_cs */
mutex_unlock(&hnode_mgr->node_mgr_lock);
}
if (status)
goto func_end;
/* assign pmsg values to new msg */
new_msg = *pmsg;
/* Now, check if message contains a SM buffer descriptor */
if (pmsg->cmd & DSP_RMSBUFDESC) {
/* Translate GPP Va to DSP physical buf Ptr. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
(void *)new_msg.arg1,
CMM_VA2DSPPA);
if (tmp_buf != NULL) {
/* got translation, convert to MAUs in msg */
if (hnode->node_mgr->dsp_word_size != 0) {
new_msg.arg1 =
(u32) tmp_buf /
hnode->node_mgr->dsp_word_size;
/* MAUs */
new_msg.arg2 /= hnode->node_mgr->
dsp_word_size;
} else {
pr_err("%s: dsp_word_size is zero!\n",
__func__);
status = -EPERM; /* bad DSPWordSize */
}
} else { /* failed to translate buffer address */
status = -ESRCH;
}
}
if (!status) {
intf_fxns = hnode_mgr->intf_fxns;
status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
&new_msg, utimeout);
}
func_end:
dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
"status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
return status;
}
/*
* ======== node_register_notify ========
* Purpose:
* Register to be notified on specific events for this node.
*/
int node_register_notify(struct node_object *hnode, u32 event_mask,
u32 notify_type,
struct dsp_notification *hnotification)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
if (!hnode) {
status = -EFAULT;
} else {
/* Check if event mask is a valid node related event */
if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
status = -EINVAL;
/* Check if notify type is valid */
if (notify_type != DSP_SIGNALEVENT)
status = -EINVAL;
/* Only one Notification can be registered at a
* time - Limitation */
if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
status = -EINVAL;
}
if (!status) {
if (event_mask == DSP_NODESTATECHANGE) {
status = ntfy_register(hnode->ntfy_obj, hnotification,
event_mask & DSP_NODESTATECHANGE,
notify_type);
} else {
/* Send Message part of event mask to msg_ctrl */
intf_fxns = hnode->node_mgr->intf_fxns;
status = (*intf_fxns->msg_register_notify)
(hnode->msg_queue_obj,
event_mask & DSP_NODEMESSAGEREADY, notify_type,
hnotification);
}
}
dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
"hnotification: %p status 0x%x\n", __func__, hnode,
event_mask, notify_type, hnotification, status);
return status;
}
/*
* ======== node_run ========
* Purpose:
* Start execution of a node's execute phase, or resume execution of a node
* that has been suspended (via NODE_NodePause()) on the DSP. Load the
* node's execute function if necessary.
*/
int node_run(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr;
enum node_type node_type;
enum node_state state;
u32 ul_execute_fxn;
u32 ul_fxn_addr;
int status = 0;
u32 proc_id;
struct bridge_drv_interface *intf_fxns;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to run the node */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
node_type = node_get_type(hnode);
if (node_type == NODE_DEVICE)
status = -EPERM;
if (status)
goto func_end;
hnode_mgr = hnode->node_mgr;
if (!hnode_mgr) {
status = -EFAULT;
goto func_end;
}
intf_fxns = hnode_mgr->intf_fxns;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state != NODE_CREATED && state != NODE_PAUSED)
status = -EBADR;
if (!status)
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont1;
if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
goto func_cont1;
if (state == NODE_CREATED) {
/* If node's execute function is not loaded, load it */
if (!(hnode->loaded) && hnode->phase_split) {
status =
hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
NLDR_EXECUTE);
if (!status) {
hnode->loaded = true;
} else {
pr_err("%s: fail - load execute code: 0x%x\n",
__func__, status);
}
}
if (!status) {
/* Get address of node's execute function */
if (proc_id == IVA_UNIT)
ul_execute_fxn = (u32) hnode->node_env;
else {
status = get_fxn_address(hnode, &ul_execute_fxn,
EXECUTEPHASE);
}
}
if (!status) {
ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
status =
disp_node_run(hnode_mgr->disp_obj, hnode,
ul_fxn_addr, ul_execute_fxn,
hnode->node_env);
}
} else if (state == NODE_PAUSED) {
ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
ul_fxn_addr, hnode->node_env,
NODE_GET_PRIORITY(hnode));
} else {
/* We should never get here */
}
func_cont1:
/* Update node state. */
if (status >= 0)
NODE_SET_STATE(hnode, NODE_RUNNING);
else /* Set state back to previous value */
NODE_SET_STATE(hnode, state);
/*End of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (status >= 0) {
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
func_end:
dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
return status;
}
/*
* ======== node_terminate ========
* Purpose:
* Signal a node running on the DSP that it should exit its execute phase
* function.
*/
int node_terminate(struct node_object *hnode, int *pstatus)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
enum node_state state;
struct dsp_msg msg, killmsg;
int status = 0;
u32 proc_id, kill_time_out;
struct deh_mgr *hdeh_mgr;
struct dsp_processorstate proc_state;
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
goto func_end;
}
if (pnode->processor == NULL) {
status = -EFAULT;
goto func_end;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (!status) {
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
}
if (!status) {
/* Check node state */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state != NODE_RUNNING) {
status = -EBADR;
/* Set the exit status if node terminated on
* its own. */
if (state == NODE_DONE)
*pstatus = hnode->exit_status;
} else {
NODE_SET_STATE(hnode, NODE_TERMINATING);
}
/* end of sync_enter_cs */
mutex_unlock(&hnode_mgr->node_mgr_lock);
}
if (!status) {
/*
* Send exit message. Do not change state to NODE_DONE
* here. That will be done in callback.
*/
status = proc_get_state(pnode->processor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_cont;
/* If processor is in error state then don't attempt to send
* A kill task command */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_cont;
}
msg.cmd = RMS_EXIT;
msg.arg1 = hnode->node_env;
killmsg.cmd = RMS_KILLTASK;
killmsg.arg1 = hnode->node_env;
intf_fxns = hnode_mgr->intf_fxns;
if (hnode->timeout > MAXTIMEOUT)
kill_time_out = MAXTIMEOUT;
else
kill_time_out = (hnode->timeout) * 2;
status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
hnode->timeout);
if (status)
goto func_cont;
/*
* Wait on synchronization object that will be
* posted in the callback on receiving RMS_EXIT
* message, or by node_delete. Check for valid hnode,
* in case posted by node_delete().
*/
status = sync_wait_on_event(hnode->sync_done,
kill_time_out / 2);
if (status != ETIME)
goto func_cont;
status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
&killmsg, hnode->timeout);
if (status)
goto func_cont;
status = sync_wait_on_event(hnode->sync_done,
kill_time_out / 2);
if (status) {
/*
* Here it goes the part of the simulation of
* the DSP exception.
*/
dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
if (!hdeh_mgr)
goto func_cont;
bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
}
}
func_cont:
if (!status) {
/* Enter CS before getting exit status, in case node was
* deleted. */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Make sure node wasn't deleted while we blocked */
if (!hnode) {
status = -EPERM;
} else {
*pstatus = hnode->exit_status;
dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
__func__, hnode, hnode->node_env, status);
}
mutex_unlock(&hnode_mgr->node_mgr_lock);
} /*End of sync_enter_cs */
func_end:
return status;
}
/*
* ======== delete_node ========
* Purpose:
* Free GPP resources allocated in node_allocate() or node_connect().
*/
static void delete_node(struct node_object *hnode,
struct process_context *pr_ctxt)
{
struct node_mgr *hnode_mgr;
struct bridge_drv_interface *intf_fxns;
u32 i;
enum node_type node_type;
struct stream_chnl stream;
struct node_msgargs node_msg_args;
struct node_taskargs task_arg_obj;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object =
(struct proc_object *)hnode->processor;
#endif
int status;
if (!hnode)
goto func_end;
hnode_mgr = hnode->node_mgr;
if (!hnode_mgr)
goto func_end;
node_type = node_get_type(hnode);
if (node_type != NODE_DEVICE) {
node_msg_args = hnode->create_args.asa.node_msg_args;
kfree(node_msg_args.pdata);
/* Free msg_ctrl queue */
if (hnode->msg_queue_obj) {
intf_fxns = hnode_mgr->intf_fxns;
(*intf_fxns->msg_delete_queue) (hnode->
msg_queue_obj);
hnode->msg_queue_obj = NULL;
}
kfree(hnode->sync_done);
/* Free all stream info */
if (hnode->inputs) {
for (i = 0; i < MAX_INPUTS(hnode); i++) {
stream = hnode->inputs[i];
free_stream(hnode_mgr, stream);
}
kfree(hnode->inputs);
hnode->inputs = NULL;
}
if (hnode->outputs) {
for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
stream = hnode->outputs[i];
free_stream(hnode_mgr, stream);
}
kfree(hnode->outputs);
hnode->outputs = NULL;
}
task_arg_obj = hnode->create_args.asa.task_arg_obj;
if (task_arg_obj.strm_in_def) {
for (i = 0; i < MAX_INPUTS(hnode); i++) {
kfree(task_arg_obj.strm_in_def[i].sz_device);
task_arg_obj.strm_in_def[i].sz_device = NULL;
}
kfree(task_arg_obj.strm_in_def);
task_arg_obj.strm_in_def = NULL;
}
if (task_arg_obj.strm_out_def) {
for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
kfree(task_arg_obj.strm_out_def[i].sz_device);
task_arg_obj.strm_out_def[i].sz_device = NULL;
}
kfree(task_arg_obj.strm_out_def);
task_arg_obj.strm_out_def = NULL;
}
if (task_arg_obj.dsp_heap_res_addr) {
status = proc_un_map(hnode->processor, (void *)
task_arg_obj.dsp_heap_addr,
pr_ctxt);
status = proc_un_reserve_memory(hnode->processor,
(void *)
task_arg_obj.
dsp_heap_res_addr,
pr_ctxt);
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
dmm_mem_map_dump(dmm_mgr);
else
status = DSP_EHANDLE;
#endif
}
}
if (node_type != NODE_MESSAGE) {
kfree(hnode->stream_connect);
hnode->stream_connect = NULL;
}
kfree(hnode->str_dev_name);
hnode->str_dev_name = NULL;
if (hnode->ntfy_obj) {
ntfy_delete(hnode->ntfy_obj);
kfree(hnode->ntfy_obj);
hnode->ntfy_obj = NULL;
}
/* These were allocated in dcd_get_object_def (via node_allocate) */
kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
/* Free all SM address translator resources */
kfree(hnode->xlator);
kfree(hnode->nldr_node_obj);
hnode->nldr_node_obj = NULL;
hnode->node_mgr = NULL;
kfree(hnode);
hnode = NULL;
func_end:
return;
}
/*
* ======== delete_node_mgr ========
* Purpose:
* Frees the node manager.
*/
static void delete_node_mgr(struct node_mgr *hnode_mgr)
{
struct node_object *hnode, *tmp;
if (hnode_mgr) {
/* Free resources */
if (hnode_mgr->dcd_mgr)
dcd_destroy_manager(hnode_mgr->dcd_mgr);
/* Remove any elements remaining in lists */
list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
list_elem) {
list_del(&hnode->list_elem);
delete_node(hnode, NULL);
}
mutex_destroy(&hnode_mgr->node_mgr_lock);
if (hnode_mgr->ntfy_obj) {
ntfy_delete(hnode_mgr->ntfy_obj);
kfree(hnode_mgr->ntfy_obj);
}
if (hnode_mgr->disp_obj)
disp_delete(hnode_mgr->disp_obj);
if (hnode_mgr->strm_mgr_obj)
strm_delete(hnode_mgr->strm_mgr_obj);
/* Delete the loader */
if (hnode_mgr->nldr_obj)
hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
kfree(hnode_mgr);
}
}
/*
* ======== fill_stream_connect ========
* Purpose:
* Fills stream information.
*/
static void fill_stream_connect(struct node_object *node1,
struct node_object *node2,
u32 stream1, u32 stream2)
{
u32 strm_index;
struct dsp_streamconnect *strm1 = NULL;
struct dsp_streamconnect *strm2 = NULL;
enum node_type node1_type = NODE_TASK;
enum node_type node2_type = NODE_TASK;
node1_type = node_get_type(node1);
node2_type = node_get_type(node2);
if (node1 != (struct node_object *)DSP_HGPPNODE) {
if (node1_type != NODE_DEVICE) {
strm_index = node1->num_inputs +
node1->num_outputs - 1;
strm1 = &(node1->stream_connect[strm_index]);
strm1->cb_struct = sizeof(struct dsp_streamconnect);
strm1->this_node_stream_index = stream1;
}
if (node2 != (struct node_object *)DSP_HGPPNODE) {
/* NODE == > NODE */
if (node1_type != NODE_DEVICE) {
strm1->connected_node = node2;
strm1->ui_connected_node_id = node2->node_uuid;
strm1->connected_node_stream_index = stream2;
strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
}
if (node2_type != NODE_DEVICE) {
strm_index = node2->num_inputs +
node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct =
sizeof(struct dsp_streamconnect);
strm2->this_node_stream_index = stream2;
strm2->connected_node = node1;
strm2->ui_connected_node_id = node1->node_uuid;
strm2->connected_node_stream_index = stream1;
strm2->connect_type = CONNECTTYPE_NODEINPUT;
}
} else if (node1_type != NODE_DEVICE)
strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
} else {
/* GPP == > NODE */
strm_index = node2->num_inputs + node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct = sizeof(struct dsp_streamconnect);
strm2->this_node_stream_index = stream2;
strm2->connect_type = CONNECTTYPE_GPPINPUT;
}
}
/*
* ======== fill_stream_def ========
* Purpose:
* Fills Stream attributes.
*/
static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs)
{
struct node_mgr *hnode_mgr = hnode->node_mgr;
if (pattrs != NULL) {
pstrm_def->num_bufs = pattrs->num_bufs;
pstrm_def->buf_size =
pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
pstrm_def->seg_id = pattrs->seg_id;
pstrm_def->buf_alignment = pattrs->buf_alignment;
pstrm_def->timeout = pattrs->timeout;
} else {
pstrm_def->num_bufs = DEFAULTNBUFS;
pstrm_def->buf_size =
DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
pstrm_def->seg_id = DEFAULTSEGID;
pstrm_def->buf_alignment = DEFAULTALIGNMENT;
pstrm_def->timeout = DEFAULTTIMEOUT;
}
}
/*
* ======== free_stream ========
* Purpose:
* Updates the channel mask and frees the pipe id.
*/
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
{
/* Free up the pipe id unless other node has not yet been deleted. */
if (stream.type == NODECONNECT) {
if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
/* The other node has already been deleted */
clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
clear_bit(stream.dev_id, hnode_mgr->pipe_map);
} else {
/* The other node has not been deleted yet */
set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
}
} else if (stream.type == HOSTCONNECT) {
if (stream.dev_id < hnode_mgr->num_chnls) {
clear_bit(stream.dev_id, hnode_mgr->chnl_map);
} else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
/* dsp-dma */
clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
hnode_mgr->dma_chnl_map);
} else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
/* zero-copy */
clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
hnode_mgr->zc_chnl_map);
}
}
}
/*
* ======== get_fxn_address ========
* Purpose:
* Retrieves the address for create, execute or delete phase for a node.
*/
static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
u32 phase)
{
char *pstr_fxn_name = NULL;
struct node_mgr *hnode_mgr = hnode->node_mgr;
int status = 0;
switch (phase) {
case CREATEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
break;
case EXECUTEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
break;
case DELETEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
break;
default:
/* Should never get here */
break;
}
status =
hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
pstr_fxn_name, fxn_addr);
return status;
}
/*
* ======== get_node_info ========
* Purpose:
* Retrieves the node information.
*/
void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
{
u32 i;
node_info->cb_struct = sizeof(struct dsp_nodeinfo);
node_info->nb_node_database_props =
hnode->dcd_props.obj_data.node_obj.ndb_props;
node_info->execution_priority = hnode->prio;
node_info->device_owner = hnode->device_owner;
node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
node_info->node_env = hnode->node_env;
node_info->ns_execution_state = node_get_state(hnode);
/* Copy stream connect data */
for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
node_info->sc_stream_connection[i] = hnode->stream_connect[i];
}
/*
* ======== get_node_props ========
* Purpose:
* Retrieve node properties.
*/
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
const struct dsp_uuid *node_uuid,
struct dcd_genericobj *dcd_prop)
{
u32 len;
struct node_msgargs *pmsg_args;
struct node_taskargs *task_arg_obj;
enum node_type node_type = NODE_TASK;
struct dsp_ndbprops *pndb_props =
&(dcd_prop->obj_data.node_obj.ndb_props);
int status = 0;
char sz_uuid[MAXUUIDLEN];
status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
DSP_DCDNODETYPE, dcd_prop);
if (!status) {
hnode->ntype = node_type = pndb_props->ntype;
/* Create UUID value to set in registry. */
snprintf(sz_uuid, MAXUUIDLEN, "%pUL", node_uuid);
dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
/* Fill in message args that come from NDB */
if (node_type != NODE_DEVICE) {
pmsg_args = &(hnode->create_args.asa.node_msg_args);
pmsg_args->seg_id =
dcd_prop->obj_data.node_obj.msg_segid;
pmsg_args->notify_type =
dcd_prop->obj_data.node_obj.msg_notify_type;
pmsg_args->max_msgs = pndb_props->message_depth;
dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
pmsg_args->max_msgs);
} else {
/* Copy device name */
len = strlen(pndb_props->ac_name);
hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
if (hnode->str_dev_name == NULL) {
status = -ENOMEM;
} else {
strncpy(hnode->str_dev_name,
pndb_props->ac_name, len);
}
}
}
if (!status) {
/* Fill in create args that come from NDB */
if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
task_arg_obj->prio = pndb_props->prio;
task_arg_obj->stack_size = pndb_props->stack_size;
task_arg_obj->sys_stack_size =
pndb_props->sys_stack_size;
task_arg_obj->stack_seg = pndb_props->stack_seg;
dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
"0x%x words System Stack Size: 0x%x words "
"Stack Segment: 0x%x profile count : 0x%x\n",
task_arg_obj->prio, task_arg_obj->stack_size,
task_arg_obj->sys_stack_size,
task_arg_obj->stack_seg,
pndb_props->count_profiles);
}
}
return status;
}
/*
* ======== get_proc_props ========
* Purpose:
* Retrieve the processor properties.
*/
static int get_proc_props(struct node_mgr *hnode_mgr,
struct dev_object *hdev_obj)
{
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
int status = 0;
status = dev_get_bridge_context(hdev_obj, &pbridge_context);
if (!pbridge_context)
status = -EFAULT;
if (!status) {
host_res = pbridge_context->resources;
if (!host_res)
return -EPERM;
hnode_mgr->chnl_offset = host_res->chnl_offset;
hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
hnode_mgr->num_chnls = host_res->num_chnls;
/*
* PROC will add an API to get dsp_processorinfo.
* Fill in default values for now.
*/
/* TODO -- Instead of hard coding, take from registry */
hnode_mgr->proc_family = 6000;
hnode_mgr->proc_type = 6410;
hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
hnode_mgr->dsp_word_size = DSPWORDSIZE;
hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
hnode_mgr->dsp_mau_size = 1;
}
return status;
}
/*
* ======== node_get_uuid_props ========
* Purpose:
* Fetch Node UUID properties from DCD/DOF file.
*/
int node_get_uuid_props(void *hprocessor,
const struct dsp_uuid *node_uuid,
struct dsp_ndbprops *node_props)
{
struct node_mgr *hnode_mgr = NULL;
struct dev_object *hdev_obj;
int status = 0;
struct dcd_nodeprops dcd_node_props;
struct dsp_processorstate proc_state;
if (hprocessor == NULL || node_uuid == NULL) {
status = -EFAULT;
goto func_end;
}
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
status = proc_get_dev_object(hprocessor, &hdev_obj);
if (hdev_obj) {
status = dev_get_node_manager(hdev_obj, &hnode_mgr);
if (hnode_mgr == NULL) {
status = -EFAULT;
goto func_end;
}
}
/*
* Enter the critical section. This is needed because
* dcd_get_object_def will ultimately end up calling dbll_open/close,
* which needs to be protected in order to not corrupt the zlib manager
* (COD).
*/
mutex_lock(&hnode_mgr->node_mgr_lock);
dcd_node_props.str_create_phase_fxn = NULL;
dcd_node_props.str_execute_phase_fxn = NULL;
dcd_node_props.str_delete_phase_fxn = NULL;
dcd_node_props.str_i_alg_name = NULL;
status = dcd_get_object_def(hnode_mgr->dcd_mgr,
(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
(struct dcd_genericobj *)&dcd_node_props);
if (!status) {
*node_props = dcd_node_props.ndb_props;
kfree(dcd_node_props.str_create_phase_fxn);
kfree(dcd_node_props.str_execute_phase_fxn);
kfree(dcd_node_props.str_delete_phase_fxn);
kfree(dcd_node_props.str_i_alg_name);
}
/* Leave the critical section, we're done. */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== get_rms_fxns ========
* Purpose:
* Retrieve the RMS functions.
*/
static int get_rms_fxns(struct node_mgr *hnode_mgr)
{
s32 i;
struct dev_object *dev_obj = hnode_mgr->dev_obj;
int status = 0;
static char *psz_fxns[NUMRMSFXNS] = {
"RMS_queryServer", /* RMSQUERYSERVER */
"RMS_configureServer", /* RMSCONFIGURESERVER */
"RMS_createNode", /* RMSCREATENODE */
"RMS_executeNode", /* RMSEXECUTENODE */
"RMS_deleteNode", /* RMSDELETENODE */
"RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
"RMS_readMemory", /* RMSREADMEMORY */
"RMS_writeMemory", /* RMSWRITEMEMORY */
"RMS_copy", /* RMSCOPY */
};
for (i = 0; i < NUMRMSFXNS; i++) {
status = dev_get_symbol(dev_obj, psz_fxns[i],
&(hnode_mgr->fxn_addrs[i]));
if (status) {
if (status == -ESPIPE) {
/*
* May be loaded dynamically (in the future),
* but return an error for now.
*/
dev_dbg(bridge, "%s: RMS function: %s currently"
" not loaded\n", __func__, psz_fxns[i]);
} else {
dev_dbg(bridge, "%s: Symbol not found: %s "
"status = 0x%x\n", __func__,
psz_fxns[i], status);
break;
}
}
}
return status;
}
/*
* ======== ovly ========
* Purpose:
* Called during overlay.Sends command to RMS to copy a block of data.
*/
static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
u32 ul_num_bytes, u32 mem_space)
{
struct node_object *hnode = (struct node_object *)priv_ref;
struct node_mgr *hnode_mgr;
u32 ul_bytes = 0;
u32 ul_size;
u32 ul_timeout;
int status = 0;
struct bridge_dev_context *hbridge_context;
/* Function interface to Bridge driver*/
struct bridge_drv_interface *intf_fxns;
hnode_mgr = hnode->node_mgr;
ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
ul_timeout = hnode->timeout;
/* Call new MemCopy function */
intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
if (!status) {
status =
(*intf_fxns->brd_mem_copy) (hbridge_context,
dsp_run_addr, dsp_load_addr,
ul_num_bytes, (u32) mem_space);
if (!status)
ul_bytes = ul_num_bytes;
else
pr_debug("%s: failed to copy brd memory, status 0x%x\n",
__func__, status);
} else {
pr_debug("%s: failed to get Bridge context, status 0x%x\n",
__func__, status);
}
return ul_bytes;
}
/*
* ======== mem_write ========
*/
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space)
{
struct node_object *hnode = (struct node_object *)priv_ref;
struct node_mgr *hnode_mgr;
u16 mem_sect_type;
u32 ul_timeout;
int status = 0;
struct bridge_dev_context *hbridge_context;
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
hnode_mgr = hnode->node_mgr;
ul_timeout = hnode->timeout;
mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
/* Call new MemWrite function */
intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
dsp_add, ul_num_bytes, mem_sect_type);
return ul_num_bytes;
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
/*
* ======== node_find_addr ========
*/
int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
u32 offset_range, void *sym_addr_output, char *sym_name)
{
struct node_object *node_obj;
int status = -ENOENT;
list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
offset_range, sym_addr_output, sym_name);
if (!status) {
pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
(unsigned int) node_mgr,
sym_addr, offset_range,
(unsigned int) sym_addr_output, sym_name);
break;
}
}
return status;
}
#endif
| gpl-2.0 |
stelios97/sony-kernel-msm7x27a | drivers/media/video/gspca/m5602/m5602_mt9m111.c | 3135 | 16512 | /*
* Driver for the mt9m111 sensor
*
* Copyright (C) 2008 Erik Andrén
* Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
* Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
*
* Portions of code to USB interface and ALi driver software,
* Copyright (c) 2006 Willem Duinker
* v4l2 interface modeled after the V4L2 driver
* for SN9C10x PC Camera Controllers
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include "m5602_mt9m111.h"
static int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
static int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
static int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
static int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
static int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
static int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val);
static int mt9m111_set_auto_white_balance(struct gspca_dev *gspca_dev,
__s32 val);
static int mt9m111_get_auto_white_balance(struct gspca_dev *gspca_dev,
__s32 *val);
static int mt9m111_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val);
static int mt9m111_set_green_balance(struct gspca_dev *gspca_dev, __s32 val);
static int mt9m111_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
static int mt9m111_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
static int mt9m111_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
static int mt9m111_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
static struct v4l2_pix_format mt9m111_modes[] = {
{
640,
480,
V4L2_PIX_FMT_SBGGR8,
V4L2_FIELD_NONE,
.sizeimage = 640 * 480,
.bytesperline = 640,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
}
};
static const struct ctrl mt9m111_ctrls[] = {
#define VFLIP_IDX 0
{
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "vertical flip",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0
},
.set = mt9m111_set_vflip,
.get = mt9m111_get_vflip
},
#define HFLIP_IDX 1
{
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "horizontal flip",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0
},
.set = mt9m111_set_hflip,
.get = mt9m111_get_hflip
},
#define GAIN_IDX 2
{
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "gain",
.minimum = 0,
.maximum = (INITIAL_MAX_GAIN - 1) * 2 * 2 * 2,
.step = 1,
.default_value = MT9M111_DEFAULT_GAIN,
.flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_gain,
.get = mt9m111_get_gain
},
#define AUTO_WHITE_BALANCE_IDX 3
{
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "auto white balance",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
.set = mt9m111_set_auto_white_balance,
.get = mt9m111_get_auto_white_balance
},
#define GREEN_BALANCE_IDX 4
{
{
.id = M5602_V4L2_CID_GREEN_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "green balance",
.minimum = 0x00,
.maximum = 0x7ff,
.step = 0x1,
.default_value = MT9M111_GREEN_GAIN_DEFAULT,
.flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_green_balance,
.get = mt9m111_get_green_balance
},
#define BLUE_BALANCE_IDX 5
{
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "blue balance",
.minimum = 0x00,
.maximum = 0x7ff,
.step = 0x1,
.default_value = MT9M111_BLUE_GAIN_DEFAULT,
.flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_blue_balance,
.get = mt9m111_get_blue_balance
},
#define RED_BALANCE_IDX 5
{
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "red balance",
.minimum = 0x00,
.maximum = 0x7ff,
.step = 0x1,
.default_value = MT9M111_RED_GAIN_DEFAULT,
.flags = V4L2_CTRL_FLAG_SLIDER
},
.set = mt9m111_set_red_balance,
.get = mt9m111_get_red_balance
},
};
static void mt9m111_dump_registers(struct sd *sd);
int mt9m111_probe(struct sd *sd)
{
u8 data[2] = {0x00, 0x00};
int i;
s32 *sensor_settings;
if (force_sensor) {
if (force_sensor == MT9M111_SENSOR) {
info("Forcing a %s sensor", mt9m111.name);
goto sensor_found;
}
/* If we want to force another sensor, don't try to probe this
* one */
return -ENODEV;
}
PDEBUG(D_PROBE, "Probing for a mt9m111 sensor");
/* Do the preinit */
for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
if (preinit_mt9m111[i][0] == BRIDGE) {
m5602_write_bridge(sd,
preinit_mt9m111[i][1],
preinit_mt9m111[i][2]);
} else {
data[0] = preinit_mt9m111[i][2];
data[1] = preinit_mt9m111[i][3];
m5602_write_sensor(sd,
preinit_mt9m111[i][1], data, 2);
}
}
if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
return -ENODEV;
if ((data[0] == 0x14) && (data[1] == 0x3a)) {
info("Detected a mt9m111 sensor");
goto sensor_found;
}
return -ENODEV;
sensor_found:
sensor_settings = kmalloc(ARRAY_SIZE(mt9m111_ctrls) * sizeof(s32),
GFP_KERNEL);
if (!sensor_settings)
return -ENOMEM;
sd->gspca_dev.cam.cam_mode = mt9m111_modes;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(mt9m111_modes);
sd->desc->ctrls = mt9m111_ctrls;
sd->desc->nctrls = ARRAY_SIZE(mt9m111_ctrls);
for (i = 0; i < ARRAY_SIZE(mt9m111_ctrls); i++)
sensor_settings[i] = mt9m111_ctrls[i].qctrl.default_value;
sd->sensor_priv = sensor_settings;
return 0;
}
int mt9m111_init(struct sd *sd)
{
int i, err = 0;
s32 *sensor_settings = sd->sensor_priv;
/* Init the sensor */
for (i = 0; i < ARRAY_SIZE(init_mt9m111) && !err; i++) {
u8 data[2];
if (init_mt9m111[i][0] == BRIDGE) {
err = m5602_write_bridge(sd,
init_mt9m111[i][1],
init_mt9m111[i][2]);
} else {
data[0] = init_mt9m111[i][2];
data[1] = init_mt9m111[i][3];
err = m5602_write_sensor(sd,
init_mt9m111[i][1], data, 2);
}
}
if (dump_sensor)
mt9m111_dump_registers(sd);
err = mt9m111_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
if (err < 0)
return err;
err = mt9m111_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
if (err < 0)
return err;
err = mt9m111_set_green_balance(&sd->gspca_dev,
sensor_settings[GREEN_BALANCE_IDX]);
if (err < 0)
return err;
err = mt9m111_set_blue_balance(&sd->gspca_dev,
sensor_settings[BLUE_BALANCE_IDX]);
if (err < 0)
return err;
err = mt9m111_set_red_balance(&sd->gspca_dev,
sensor_settings[RED_BALANCE_IDX]);
if (err < 0)
return err;
return mt9m111_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
}
int mt9m111_start(struct sd *sd)
{
int i, err = 0;
u8 data[2];
struct cam *cam = &sd->gspca_dev.cam;
s32 *sensor_settings = sd->sensor_priv;
int width = cam->cam_mode[sd->gspca_dev.curr_mode].width - 1;
int height = cam->cam_mode[sd->gspca_dev.curr_mode].height;
for (i = 0; i < ARRAY_SIZE(start_mt9m111) && !err; i++) {
if (start_mt9m111[i][0] == BRIDGE) {
err = m5602_write_bridge(sd,
start_mt9m111[i][1],
start_mt9m111[i][2]);
} else {
data[0] = start_mt9m111[i][2];
data[1] = start_mt9m111[i][3];
err = m5602_write_sensor(sd,
start_mt9m111[i][1], data, 2);
}
}
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff));
if (err < 0)
return err;
for (i = 0; i < 2 && !err; i++)
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 2);
if (err < 0)
return err;
for (i = 0; i < 2 && !err; i++)
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
(width >> 8) & 0xff);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, width & 0xff);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
if (err < 0)
return err;
switch (width) {
case 640:
PDEBUG(D_V4L2, "Configuring camera for VGA mode");
data[0] = MT9M111_RMB_OVER_SIZED;
data[1] = MT9M111_RMB_ROW_SKIP_2X |
MT9M111_RMB_COLUMN_SKIP_2X |
(sensor_settings[VFLIP_IDX] << 0) |
(sensor_settings[HFLIP_IDX] << 1);
err = m5602_write_sensor(sd,
MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
break;
case 320:
PDEBUG(D_V4L2, "Configuring camera for QVGA mode");
data[0] = MT9M111_RMB_OVER_SIZED;
data[1] = MT9M111_RMB_ROW_SKIP_4X |
MT9M111_RMB_COLUMN_SKIP_4X |
(sensor_settings[VFLIP_IDX] << 0) |
(sensor_settings[HFLIP_IDX] << 1);
err = m5602_write_sensor(sd,
MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
break;
}
return err;
}
void mt9m111_disconnect(struct sd *sd)
{
sd->sensor = NULL;
kfree(sd->sensor_priv);
}
static int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[VFLIP_IDX];
PDEBUG(D_V4L2, "Read vertical flip %d", *val);
return 0;
}
static int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
PDEBUG(D_V4L2, "Set vertical flip to %d", val);
sensor_settings[VFLIP_IDX] = val;
/* The mt9m111 is flipped by default */
val = !val;
/* Set the correct page map */
err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
if (err < 0)
return err;
err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
if (err < 0)
return err;
data[1] = (data[1] & 0xfe) | val;
err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
data, 2);
return err;
}
static int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[HFLIP_IDX];
PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
return 0;
}
static int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
sensor_settings[HFLIP_IDX] = val;
/* The mt9m111 is flipped by default */
val = !val;
/* Set the correct page map */
err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
if (err < 0)
return err;
err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
if (err < 0)
return err;
data[1] = (data[1] & 0xfd) | ((val << 1) & 0x02);
err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
data, 2);
return err;
}
static int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[GAIN_IDX];
PDEBUG(D_V4L2, "Read gain %d", *val);
return 0;
}
static int mt9m111_set_auto_white_balance(struct gspca_dev *gspca_dev,
__s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
int err;
u8 data[2];
err = m5602_read_sensor(sd, MT9M111_CP_OPERATING_MODE_CTL, data, 2);
if (err < 0)
return err;
sensor_settings[AUTO_WHITE_BALANCE_IDX] = val & 0x01;
data[1] = ((data[1] & 0xfd) | ((val & 0x01) << 1));
err = m5602_write_sensor(sd, MT9M111_CP_OPERATING_MODE_CTL, data, 2);
PDEBUG(D_V4L2, "Set auto white balance %d", val);
return err;
}
static int mt9m111_get_auto_white_balance(struct gspca_dev *gspca_dev,
__s32 *val) {
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[AUTO_WHITE_BALANCE_IDX];
PDEBUG(D_V4L2, "Read auto white balance %d", *val);
return 0;
}
static int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err, tmp;
u8 data[2] = {0x00, 0x00};
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[GAIN_IDX] = val;
/* Set the correct page map */
err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
if (err < 0)
return err;
if (val >= INITIAL_MAX_GAIN * 2 * 2 * 2)
return -EINVAL;
if ((val >= INITIAL_MAX_GAIN * 2 * 2) &&
(val < (INITIAL_MAX_GAIN - 1) * 2 * 2 * 2))
tmp = (1 << 10) | (val << 9) |
(val << 8) | (val / 8);
else if ((val >= INITIAL_MAX_GAIN * 2) &&
(val < INITIAL_MAX_GAIN * 2 * 2))
tmp = (1 << 9) | (1 << 8) | (val / 4);
else if ((val >= INITIAL_MAX_GAIN) &&
(val < INITIAL_MAX_GAIN * 2))
tmp = (1 << 8) | (val / 2);
else
tmp = val;
data[1] = (tmp & 0xff);
data[0] = (tmp & 0xff00) >> 8;
PDEBUG(D_V4L2, "tmp=%d, data[1]=%d, data[0]=%d", tmp,
data[1], data[0]);
err = m5602_write_sensor(sd, MT9M111_SC_GLOBAL_GAIN,
data, 2);
return err;
}
static int mt9m111_set_green_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 data[2];
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[GREEN_BALANCE_IDX] = val;
data[1] = (val & 0xff);
data[0] = (val & 0xff00) >> 8;
PDEBUG(D_V4L2, "Set green balance %d", val);
err = m5602_write_sensor(sd, MT9M111_SC_GREEN_1_GAIN,
data, 2);
if (err < 0)
return err;
return m5602_write_sensor(sd, MT9M111_SC_GREEN_2_GAIN,
data, 2);
}
static int mt9m111_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[GREEN_BALANCE_IDX];
PDEBUG(D_V4L2, "Read green balance %d", *val);
return 0;
}
static int mt9m111_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
u8 data[2];
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[BLUE_BALANCE_IDX] = val;
data[1] = (val & 0xff);
data[0] = (val & 0xff00) >> 8;
PDEBUG(D_V4L2, "Set blue balance %d", val);
return m5602_write_sensor(sd, MT9M111_SC_BLUE_GAIN,
data, 2);
}
static int mt9m111_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[BLUE_BALANCE_IDX];
PDEBUG(D_V4L2, "Read blue balance %d", *val);
return 0;
}
static int mt9m111_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
u8 data[2];
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[RED_BALANCE_IDX] = val;
data[1] = (val & 0xff);
data[0] = (val & 0xff00) >> 8;
PDEBUG(D_V4L2, "Set red balance %d", val);
return m5602_write_sensor(sd, MT9M111_SC_RED_GAIN,
data, 2);
}
static int mt9m111_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[RED_BALANCE_IDX];
PDEBUG(D_V4L2, "Read red balance %d", *val);
return 0;
}
static void mt9m111_dump_registers(struct sd *sd)
{
u8 address, value[2] = {0x00, 0x00};
info("Dumping the mt9m111 register state");
info("Dumping the mt9m111 sensor core registers");
value[1] = MT9M111_SENSOR_CORE;
m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
for (address = 0; address < 0xff; address++) {
m5602_read_sensor(sd, address, value, 2);
info("register 0x%x contains 0x%x%x",
address, value[0], value[1]);
}
info("Dumping the mt9m111 color pipeline registers");
value[1] = MT9M111_COLORPIPE;
m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
for (address = 0; address < 0xff; address++) {
m5602_read_sensor(sd, address, value, 2);
info("register 0x%x contains 0x%x%x",
address, value[0], value[1]);
}
info("Dumping the mt9m111 camera control registers");
value[1] = MT9M111_CAMERA_CONTROL;
m5602_write_sensor(sd, MT9M111_PAGE_MAP, value, 2);
for (address = 0; address < 0xff; address++) {
m5602_read_sensor(sd, address, value, 2);
info("register 0x%x contains 0x%x%x",
address, value[0], value[1]);
}
info("mt9m111 register state dump complete");
}
| gpl-2.0 |
Pure4Team/desire820 | drivers/s390/crypto/zcrypt_pcixcc.c | 3903 | 11103 | /*
* zcrypt 2.1.0
*
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_error.h"
#include "zcrypt_msgtype6.h"
#include "zcrypt_pcixcc.h"
#include "zcrypt_cca_key.h"
#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
#define PCIXCC_MCL2_SPEED_RATING 7870
#define PCIXCC_MCL3_SPEED_RATING 7870
#define CEX2C_SPEED_RATING 7000
#define CEX3C_SPEED_RATING 6500
#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
#define PCIXCC_CLEANUP_TIME (15*HZ)
#define CEIL4(x) ((((x)+3)/4)*4)
struct response_type {
struct completion work;
int type;
};
#define PCIXCC_RESPONSE_TYPE_ICA 0
#define PCIXCC_RESPONSE_TYPE_XCRB 1
static struct ap_device_id zcrypt_pcixcc_ids[] = {
{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
{ AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
static struct ap_driver zcrypt_pcixcc_driver = {
.probe = zcrypt_pcixcc_probe,
.remove = zcrypt_pcixcc_remove,
.ids = zcrypt_pcixcc_ids,
.request_timeout = PCIXCC_CLEANUP_TIME,
};
/**
* Micro-code detection function. Its sends a message to a pcixcc card
* to find out the microcode level.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
{
static unsigned char msg[] = {
0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
0xF1,0x3D,0x93,0x53
};
unsigned long long psmid;
struct CPRBX *cprbx;
char *reply;
int rc, i;
reply = (void *) get_zeroed_page(GFP_KERNEL);
if (!reply)
return -ENOMEM;
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 6; i++) {
mdelay(300);
rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
if (i >= 6) {
/* Got no answer. */
rc = -ENODEV;
goto out_free;
}
cprbx = (struct CPRBX *) (reply + 48);
if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
rc = ZCRYPT_PCIXCC_MCL2;
else
rc = ZCRYPT_PCIXCC_MCL3;
out_free:
free_page((unsigned long) reply);
return rc;
}
/**
* Large random number detection function. Its sends a message to a pcixcc
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
*
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/
static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
{
struct ap_message ap_msg;
unsigned long long psmid;
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *reply;
int rc, i;
ap_init_message(&ap_msg);
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length);
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
if (i >= 2 * HZ) {
/* Got no answer. */
rc = -ENODEV;
goto out_free;
}
reply = ap_msg.message;
if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
rc = 1;
else
rc = 0;
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/**
* Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
* since the bus_match already checked the hardware type. The PCIXCC
* cards come in two flavours: micro code level 2 and micro code level 3.
* This is checked by sending a test message to the device.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev;
int rc = 0;
zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
zdev->online = 1;
switch (ap_dev->device_type) {
case AP_DEVICE_TYPE_PCIXCC:
rc = zcrypt_pcixcc_mcl(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
return rc;
}
zdev->user_space_type = rc;
if (rc == ZCRYPT_PCIXCC_MCL2) {
zdev->type_string = "PCIXCC_MCL2";
zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
} else {
zdev->type_string = "PCIXCC_MCL3";
zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
}
break;
case AP_DEVICE_TYPE_CEX2C:
zdev->user_space_type = ZCRYPT_CEX2C;
zdev->type_string = "CEX2C";
zdev->speed_rating = CEX2C_SPEED_RATING;
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
break;
case AP_DEVICE_TYPE_CEX3C:
zdev->user_space_type = ZCRYPT_CEX3C;
zdev->type_string = "CEX3C";
zdev->speed_rating = CEX3C_SPEED_RATING;
zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
break;
default:
goto out_free;
}
rc = zcrypt_pcixcc_rng_supported(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
return rc;
}
if (rc)
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
else
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
ap_dev->reply = &zdev->reply;
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc)
goto out_free;
return 0;
out_free:
ap_dev->private = NULL;
zcrypt_msgtype_release(zdev->ops);
zcrypt_device_free(zdev);
return rc;
}
/**
* This is called to remove the extended PCIXCC/CEX2C driver information
* if an AP device is removed.
*/
static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
{
struct zcrypt_device *zdev = ap_dev->private;
struct zcrypt_ops *zops = zdev->ops;
zcrypt_device_unregister(zdev);
zcrypt_msgtype_release(zops);
}
int __init zcrypt_pcixcc_init(void)
{
return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
}
void zcrypt_pcixcc_exit(void)
{
ap_driver_unregister(&zcrypt_pcixcc_driver);
}
module_init(zcrypt_pcixcc_init);
module_exit(zcrypt_pcixcc_exit);
| gpl-2.0 |
gentu/android_kernel_zte_nx503a | net/sctp/auth.c | 3903 | 24507 | /* SCTP kernel implementation
* (C) Copyright 2007 Hewlett-Packard Development Company, L.P.
*
* This file is part of the SCTP kernel implementation
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <net/sctp/sctp.h>
#include <net/sctp/auth.h>
static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
{
/* id 0 is reserved. as all 0 */
.hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_0,
},
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA1,
.hmac_name="hmac(sha1)",
.hmac_len = SCTP_SHA1_SIG_SIZE,
},
{
/* id 2 is reserved as well */
.hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2,
},
#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA256,
.hmac_name="hmac(sha256)",
.hmac_len = SCTP_SHA256_SIG_SIZE,
}
#endif
};
void sctp_auth_key_put(struct sctp_auth_bytes *key)
{
if (!key)
return;
if (atomic_dec_and_test(&key->refcnt)) {
kfree(key);
SCTP_DBG_OBJCNT_DEC(keys);
}
}
/* Create a new key structure of a given length */
static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
{
struct sctp_auth_bytes *key;
/* Verify that we are not going to overflow INT_MAX */
if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
return NULL;
/* Allocate the shared key */
key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
if (!key)
return NULL;
key->len = key_len;
atomic_set(&key->refcnt, 1);
SCTP_DBG_OBJCNT_INC(keys);
return key;
}
/* Create a new shared key container with a give key id */
struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
{
struct sctp_shared_key *new;
/* Allocate the shared key container */
new = kzalloc(sizeof(struct sctp_shared_key), gfp);
if (!new)
return NULL;
INIT_LIST_HEAD(&new->key_list);
new->key_id = key_id;
return new;
}
/* Free the shared key structure */
static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
{
BUG_ON(!list_empty(&sh_key->key_list));
sctp_auth_key_put(sh_key->key);
sh_key->key = NULL;
kfree(sh_key);
}
/* Destroy the entire key list. This is done during the
* associon and endpoint free process.
*/
void sctp_auth_destroy_keys(struct list_head *keys)
{
struct sctp_shared_key *ep_key;
struct sctp_shared_key *tmp;
if (list_empty(keys))
return;
key_for_each_safe(ep_key, tmp, keys) {
list_del_init(&ep_key->key_list);
sctp_auth_shkey_free(ep_key);
}
}
/* Compare two byte vectors as numbers. Return values
* are:
* 0 - vectors are equal
* < 0 - vector 1 is smaller than vector2
* > 0 - vector 1 is greater than vector2
*
* Algorithm is:
* This is performed by selecting the numerically smaller key vector...
* If the key vectors are equal as numbers but differ in length ...
* the shorter vector is considered smaller
*
* Examples (with small values):
* 000123456789 > 123456789 (first number is longer)
* 000123456789 < 234567891 (second number is larger numerically)
* 123456789 > 2345678 (first number is both larger & longer)
*/
static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1,
struct sctp_auth_bytes *vector2)
{
int diff;
int i;
const __u8 *longer;
diff = vector1->len - vector2->len;
if (diff) {
longer = (diff > 0) ? vector1->data : vector2->data;
/* Check to see if the longer number is
* lead-zero padded. If it is not, it
* is automatically larger numerically.
*/
for (i = 0; i < abs(diff); i++ ) {
if (longer[i] != 0)
return diff;
}
}
/* lengths are the same, compare numbers */
return memcmp(vector1->data, vector2->data, vector1->len);
}
/*
* Create a key vector as described in SCTP-AUTH, Section 6.1
* The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO
* parameter sent by each endpoint are concatenated as byte vectors.
* These parameters include the parameter type, parameter length, and
* the parameter value, but padding is omitted; all padding MUST be
* removed from this concatenation before proceeding with further
* computation of keys. Parameters which were not sent are simply
* omitted from the concatenation process. The resulting two vectors
* are called the two key vectors.
*/
static struct sctp_auth_bytes *sctp_auth_make_key_vector(
sctp_random_param_t *random,
sctp_chunks_param_t *chunks,
sctp_hmac_algo_param_t *hmacs,
gfp_t gfp)
{
struct sctp_auth_bytes *new;
__u32 len;
__u32 offset = 0;
len = ntohs(random->param_hdr.length) + ntohs(hmacs->param_hdr.length);
if (chunks)
len += ntohs(chunks->param_hdr.length);
new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp);
if (!new)
return NULL;
new->len = len;
memcpy(new->data, random, ntohs(random->param_hdr.length));
offset += ntohs(random->param_hdr.length);
if (chunks) {
memcpy(new->data + offset, chunks,
ntohs(chunks->param_hdr.length));
offset += ntohs(chunks->param_hdr.length);
}
memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length));
return new;
}
/* Make a key vector based on our local parameters */
static struct sctp_auth_bytes *sctp_auth_make_local_vector(
const struct sctp_association *asoc,
gfp_t gfp)
{
return sctp_auth_make_key_vector(
(sctp_random_param_t*)asoc->c.auth_random,
(sctp_chunks_param_t*)asoc->c.auth_chunks,
(sctp_hmac_algo_param_t*)asoc->c.auth_hmacs,
gfp);
}
/* Make a key vector based on peer's parameters */
static struct sctp_auth_bytes *sctp_auth_make_peer_vector(
const struct sctp_association *asoc,
gfp_t gfp)
{
return sctp_auth_make_key_vector(asoc->peer.peer_random,
asoc->peer.peer_chunks,
asoc->peer.peer_hmacs,
gfp);
}
/* Set the value of the association shared key base on the parameters
* given. The algorithm is:
* From the endpoint pair shared keys and the key vectors the
* association shared keys are computed. This is performed by selecting
* the numerically smaller key vector and concatenating it to the
* endpoint pair shared key, and then concatenating the numerically
* larger key vector to that. The result of the concatenation is the
* association shared key.
*/
static struct sctp_auth_bytes *sctp_auth_asoc_set_secret(
struct sctp_shared_key *ep_key,
struct sctp_auth_bytes *first_vector,
struct sctp_auth_bytes *last_vector,
gfp_t gfp)
{
struct sctp_auth_bytes *secret;
__u32 offset = 0;
__u32 auth_len;
auth_len = first_vector->len + last_vector->len;
if (ep_key->key)
auth_len += ep_key->key->len;
secret = sctp_auth_create_key(auth_len, gfp);
if (!secret)
return NULL;
if (ep_key->key) {
memcpy(secret->data, ep_key->key->data, ep_key->key->len);
offset += ep_key->key->len;
}
memcpy(secret->data + offset, first_vector->data, first_vector->len);
offset += first_vector->len;
memcpy(secret->data + offset, last_vector->data, last_vector->len);
return secret;
}
/* Create an association shared key. Follow the algorithm
* described in SCTP-AUTH, Section 6.1
*/
static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(
const struct sctp_association *asoc,
struct sctp_shared_key *ep_key,
gfp_t gfp)
{
struct sctp_auth_bytes *local_key_vector;
struct sctp_auth_bytes *peer_key_vector;
struct sctp_auth_bytes *first_vector,
*last_vector;
struct sctp_auth_bytes *secret = NULL;
int cmp;
/* Now we need to build the key vectors
* SCTP-AUTH , Section 6.1
* The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO
* parameter sent by each endpoint are concatenated as byte vectors.
* These parameters include the parameter type, parameter length, and
* the parameter value, but padding is omitted; all padding MUST be
* removed from this concatenation before proceeding with further
* computation of keys. Parameters which were not sent are simply
* omitted from the concatenation process. The resulting two vectors
* are called the two key vectors.
*/
local_key_vector = sctp_auth_make_local_vector(asoc, gfp);
peer_key_vector = sctp_auth_make_peer_vector(asoc, gfp);
if (!peer_key_vector || !local_key_vector)
goto out;
/* Figure out the order in which the key_vectors will be
* added to the endpoint shared key.
* SCTP-AUTH, Section 6.1:
* This is performed by selecting the numerically smaller key
* vector and concatenating it to the endpoint pair shared
* key, and then concatenating the numerically larger key
* vector to that. If the key vectors are equal as numbers
* but differ in length, then the concatenation order is the
* endpoint shared key, followed by the shorter key vector,
* followed by the longer key vector. Otherwise, the key
* vectors are identical, and may be concatenated to the
* endpoint pair key in any order.
*/
cmp = sctp_auth_compare_vectors(local_key_vector,
peer_key_vector);
if (cmp < 0) {
first_vector = local_key_vector;
last_vector = peer_key_vector;
} else {
first_vector = peer_key_vector;
last_vector = local_key_vector;
}
secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector,
gfp);
out:
kfree(local_key_vector);
kfree(peer_key_vector);
return secret;
}
/*
* Populate the association overlay list with the list
* from the endpoint.
*/
int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
struct sctp_association *asoc,
gfp_t gfp)
{
struct sctp_shared_key *sh_key;
struct sctp_shared_key *new;
BUG_ON(!list_empty(&asoc->endpoint_shared_keys));
key_for_each(sh_key, &ep->endpoint_shared_keys) {
new = sctp_auth_shkey_create(sh_key->key_id, gfp);
if (!new)
goto nomem;
new->key = sh_key->key;
sctp_auth_key_hold(new->key);
list_add(&new->key_list, &asoc->endpoint_shared_keys);
}
return 0;
nomem:
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
return -ENOMEM;
}
/* Public interface to creat the association shared key.
* See code above for the algorithm.
*/
int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
{
struct sctp_auth_bytes *secret;
struct sctp_shared_key *ep_key;
/* If we don't support AUTH, or peer is not capable
* we don't need to do anything.
*/
if (!sctp_auth_enable || !asoc->peer.auth_capable)
return 0;
/* If the key_id is non-zero and we couldn't find an
* endpoint pair shared key, we can't compute the
* secret.
* For key_id 0, endpoint pair shared key is a NULL key.
*/
ep_key = sctp_auth_get_shkey(asoc, asoc->active_key_id);
BUG_ON(!ep_key);
secret = sctp_auth_asoc_create_secret(asoc, ep_key, gfp);
if (!secret)
return -ENOMEM;
sctp_auth_key_put(asoc->asoc_shared_key);
asoc->asoc_shared_key = secret;
return 0;
}
/* Find the endpoint pair shared key based on the key_id */
struct sctp_shared_key *sctp_auth_get_shkey(
const struct sctp_association *asoc,
__u16 key_id)
{
struct sctp_shared_key *key;
/* First search associations set of endpoint pair shared keys */
key_for_each(key, &asoc->endpoint_shared_keys) {
if (key->key_id == key_id)
return key;
}
return NULL;
}
/*
* Initialize all the possible digest transforms that we can use. Right now
* now, the supported digests are SHA1 and SHA256. We do this here once
* because of the restrictiong that transforms may only be allocated in
* user context. This forces us to pre-allocated all possible transforms
* at the endpoint init time.
*/
int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
{
struct crypto_hash *tfm = NULL;
__u16 id;
/* if the transforms are already allocted, we are done */
if (!sctp_auth_enable) {
ep->auth_hmacs = NULL;
return 0;
}
if (ep->auth_hmacs)
return 0;
/* Allocated the array of pointers to transorms */
ep->auth_hmacs = kzalloc(
sizeof(struct crypto_hash *) * SCTP_AUTH_NUM_HMACS,
gfp);
if (!ep->auth_hmacs)
return -ENOMEM;
for (id = 0; id < SCTP_AUTH_NUM_HMACS; id++) {
/* See is we support the id. Supported IDs have name and
* length fields set, so that we can allocated and use
* them. We can safely just check for name, for without the
* name, we can't allocate the TFM.
*/
if (!sctp_hmac_list[id].hmac_name)
continue;
/* If this TFM has been allocated, we are all set */
if (ep->auth_hmacs[id])
continue;
/* Allocate the ID */
tfm = crypto_alloc_hash(sctp_hmac_list[id].hmac_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto out_err;
ep->auth_hmacs[id] = tfm;
}
return 0;
out_err:
/* Clean up any successful allocations */
sctp_auth_destroy_hmacs(ep->auth_hmacs);
return -ENOMEM;
}
/* Destroy the hmac tfm array */
void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[])
{
int i;
if (!auth_hmacs)
return;
for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++)
{
if (auth_hmacs[i])
crypto_free_hash(auth_hmacs[i]);
}
kfree(auth_hmacs);
}
struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id)
{
return &sctp_hmac_list[hmac_id];
}
/* Get an hmac description information that we can use to build
* the AUTH chunk
*/
struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
{
struct sctp_hmac_algo_param *hmacs;
__u16 n_elt;
__u16 id = 0;
int i;
/* If we have a default entry, use it */
if (asoc->default_hmac_id)
return &sctp_hmac_list[asoc->default_hmac_id];
/* Since we do not have a default entry, find the first entry
* we support and return that. Do not cache that id.
*/
hmacs = asoc->peer.peer_hmacs;
if (!hmacs)
return NULL;
n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1;
for (i = 0; i < n_elt; i++) {
id = ntohs(hmacs->hmac_ids[i]);
/* Check the id is in the supported range */
if (id > SCTP_AUTH_HMAC_ID_MAX) {
id = 0;
continue;
}
/* See is we support the id. Supported IDs have name and
* length fields set, so that we can allocated and use
* them. We can safely just check for name, for without the
* name, we can't allocate the TFM.
*/
if (!sctp_hmac_list[id].hmac_name) {
id = 0;
continue;
}
break;
}
if (id == 0)
return NULL;
return &sctp_hmac_list[id];
}
static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id)
{
int found = 0;
int i;
for (i = 0; i < n_elts; i++) {
if (hmac_id == hmacs[i]) {
found = 1;
break;
}
}
return found;
}
/* See if the HMAC_ID is one that we claim as supported */
int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
__be16 hmac_id)
{
struct sctp_hmac_algo_param *hmacs;
__u16 n_elt;
if (!asoc)
return 0;
hmacs = (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs;
n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1;
return __sctp_auth_find_hmacid(hmacs->hmac_ids, n_elt, hmac_id);
}
/* Cache the default HMAC id. This to follow this text from SCTP-AUTH:
* Section 6.1:
* The receiver of a HMAC-ALGO parameter SHOULD use the first listed
* algorithm it supports.
*/
void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
struct sctp_hmac_algo_param *hmacs)
{
struct sctp_endpoint *ep;
__u16 id;
int i;
int n_params;
/* if the default id is already set, use it */
if (asoc->default_hmac_id)
return;
n_params = (ntohs(hmacs->param_hdr.length)
- sizeof(sctp_paramhdr_t)) >> 1;
ep = asoc->ep;
for (i = 0; i < n_params; i++) {
id = ntohs(hmacs->hmac_ids[i]);
/* Check the id is in the supported range */
if (id > SCTP_AUTH_HMAC_ID_MAX)
continue;
/* If this TFM has been allocated, use this id */
if (ep->auth_hmacs[id]) {
asoc->default_hmac_id = id;
break;
}
}
}
/* Check to see if the given chunk is supposed to be authenticated */
static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
{
unsigned short len;
int found = 0;
int i;
if (!param || param->param_hdr.length == 0)
return 0;
len = ntohs(param->param_hdr.length) - sizeof(sctp_paramhdr_t);
/* SCTP-AUTH, Section 3.2
* The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH
* chunks MUST NOT be listed in the CHUNKS parameter. However, if
* a CHUNKS parameter is received then the types for INIT, INIT-ACK,
* SHUTDOWN-COMPLETE and AUTH chunks MUST be ignored.
*/
for (i = 0; !found && i < len; i++) {
switch (param->chunks[i]) {
case SCTP_CID_INIT:
case SCTP_CID_INIT_ACK:
case SCTP_CID_SHUTDOWN_COMPLETE:
case SCTP_CID_AUTH:
break;
default:
if (param->chunks[i] == chunk)
found = 1;
break;
}
}
return found;
}
/* Check if peer requested that this chunk is authenticated */
int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
{
if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable)
return 0;
return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
}
/* Check if we requested that peer authenticate this chunk. */
int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
{
if (!sctp_auth_enable || !asoc)
return 0;
return __sctp_auth_cid(chunk,
(struct sctp_chunks_param *)asoc->c.auth_chunks);
}
/* SCTP-AUTH: Section 6.2:
* The sender MUST calculate the MAC as described in RFC2104 [2] using
* the hash function H as described by the MAC Identifier and the shared
* association key K based on the endpoint pair shared key described by
* the shared key identifier. The 'data' used for the computation of
* the AUTH-chunk is given by the AUTH chunk with its HMAC field set to
* zero (as shown in Figure 6) followed by all chunks that are placed
* after the AUTH chunk in the SCTP packet.
*/
void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
struct sk_buff *skb,
struct sctp_auth_chunk *auth,
gfp_t gfp)
{
struct scatterlist sg;
struct hash_desc desc;
struct sctp_auth_bytes *asoc_key;
__u16 key_id, hmac_id;
__u8 *digest;
unsigned char *end;
int free_key = 0;
/* Extract the info we need:
* - hmac id
* - key id
*/
key_id = ntohs(auth->auth_hdr.shkey_id);
hmac_id = ntohs(auth->auth_hdr.hmac_id);
if (key_id == asoc->active_key_id)
asoc_key = asoc->asoc_shared_key;
else {
struct sctp_shared_key *ep_key;
ep_key = sctp_auth_get_shkey(asoc, key_id);
if (!ep_key)
return;
asoc_key = sctp_auth_asoc_create_secret(asoc, ep_key, gfp);
if (!asoc_key)
return;
free_key = 1;
}
/* set up scatter list */
end = skb_tail_pointer(skb);
sg_init_one(&sg, auth, end - (unsigned char *)auth);
desc.tfm = asoc->ep->auth_hmacs[hmac_id];
desc.flags = 0;
digest = auth->auth_hdr.hmac;
if (crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len))
goto free;
crypto_hash_digest(&desc, &sg, sg.length, digest);
free:
if (free_key)
sctp_auth_key_put(asoc_key);
}
/* API Helpers */
/* Add a chunk to the endpoint authenticated chunk list */
int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id)
{
struct sctp_chunks_param *p = ep->auth_chunk_list;
__u16 nchunks;
__u16 param_len;
/* If this chunk is already specified, we are done */
if (__sctp_auth_cid(chunk_id, p))
return 0;
/* Check if we can add this chunk to the array */
param_len = ntohs(p->param_hdr.length);
nchunks = param_len - sizeof(sctp_paramhdr_t);
if (nchunks == SCTP_NUM_CHUNK_TYPES)
return -EINVAL;
p->chunks[nchunks] = chunk_id;
p->param_hdr.length = htons(param_len + 1);
return 0;
}
/* Add hmac identifires to the endpoint list of supported hmac ids */
int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
struct sctp_hmacalgo *hmacs)
{
int has_sha1 = 0;
__u16 id;
int i;
/* Scan the list looking for unsupported id. Also make sure that
* SHA1 is specified.
*/
for (i = 0; i < hmacs->shmac_num_idents; i++) {
id = hmacs->shmac_idents[i];
if (id > SCTP_AUTH_HMAC_ID_MAX)
return -EOPNOTSUPP;
if (SCTP_AUTH_HMAC_ID_SHA1 == id)
has_sha1 = 1;
if (!sctp_hmac_list[id].hmac_name)
return -EOPNOTSUPP;
}
if (!has_sha1)
return -EINVAL;
memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
hmacs->shmac_num_idents * sizeof(__u16));
ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
hmacs->shmac_num_idents * sizeof(__u16));
return 0;
}
/* Set a new shared key on either endpoint or association. If the
* the key with a same ID already exists, replace the key (remove the
* old key and add a new one).
*/
int sctp_auth_set_key(struct sctp_endpoint *ep,
struct sctp_association *asoc,
struct sctp_authkey *auth_key)
{
struct sctp_shared_key *cur_key = NULL;
struct sctp_auth_bytes *key;
struct list_head *sh_keys;
int replace = 0;
/* Try to find the given key id to see if
* we are doing a replace, or adding a new key
*/
if (asoc)
sh_keys = &asoc->endpoint_shared_keys;
else
sh_keys = &ep->endpoint_shared_keys;
key_for_each(cur_key, sh_keys) {
if (cur_key->key_id == auth_key->sca_keynumber) {
replace = 1;
break;
}
}
/* If we are not replacing a key id, we need to allocate
* a shared key.
*/
if (!replace) {
cur_key = sctp_auth_shkey_create(auth_key->sca_keynumber,
GFP_KERNEL);
if (!cur_key)
return -ENOMEM;
}
/* Create a new key data based on the info passed in */
key = sctp_auth_create_key(auth_key->sca_keylength, GFP_KERNEL);
if (!key)
goto nomem;
memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength);
/* If we are replacing, remove the old keys data from the
* key id. If we are adding new key id, add it to the
* list.
*/
if (replace)
sctp_auth_key_put(cur_key->key);
else
list_add(&cur_key->key_list, sh_keys);
cur_key->key = key;
sctp_auth_key_hold(key);
return 0;
nomem:
if (!replace)
sctp_auth_shkey_free(cur_key);
return -ENOMEM;
}
int sctp_auth_set_active_key(struct sctp_endpoint *ep,
struct sctp_association *asoc,
__u16 key_id)
{
struct sctp_shared_key *key;
struct list_head *sh_keys;
int found = 0;
/* The key identifier MUST correst to an existing key */
if (asoc)
sh_keys = &asoc->endpoint_shared_keys;
else
sh_keys = &ep->endpoint_shared_keys;
key_for_each(key, sh_keys) {
if (key->key_id == key_id) {
found = 1;
break;
}
}
if (!found)
return -EINVAL;
if (asoc) {
asoc->active_key_id = key_id;
sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
} else
ep->active_key_id = key_id;
return 0;
}
int sctp_auth_del_key_id(struct sctp_endpoint *ep,
struct sctp_association *asoc,
__u16 key_id)
{
struct sctp_shared_key *key;
struct list_head *sh_keys;
int found = 0;
/* The key identifier MUST NOT be the current active key
* The key identifier MUST correst to an existing key
*/
if (asoc) {
if (asoc->active_key_id == key_id)
return -EINVAL;
sh_keys = &asoc->endpoint_shared_keys;
} else {
if (ep->active_key_id == key_id)
return -EINVAL;
sh_keys = &ep->endpoint_shared_keys;
}
key_for_each(key, sh_keys) {
if (key->key_id == key_id) {
found = 1;
break;
}
}
if (!found)
return -EINVAL;
/* Delete the shared key */
list_del_init(&key->key_list);
sctp_auth_shkey_free(key);
return 0;
}
| gpl-2.0 |
yank555-lu/TF101G_V9.2.2.6 | drivers/s390/crypto/zcrypt_mono.c | 4415 | 2270 | /*
* linux/drivers/s390/crypto/zcrypt_mono.c
*
* zcrypt 2.1.0
*
* Copyright (C) 2001, 2006 IBM Corporation
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/compat.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
#include "zcrypt_pcica.h"
#include "zcrypt_pcicc.h"
#include "zcrypt_pcixcc.h"
#include "zcrypt_cex2a.h"
/**
* The module initialization code.
*/
static int __init zcrypt_init(void)
{
int rc;
rc = ap_module_init();
if (rc)
goto out;
rc = zcrypt_api_init();
if (rc)
goto out_ap;
rc = zcrypt_pcica_init();
if (rc)
goto out_api;
rc = zcrypt_pcicc_init();
if (rc)
goto out_pcica;
rc = zcrypt_pcixcc_init();
if (rc)
goto out_pcicc;
rc = zcrypt_cex2a_init();
if (rc)
goto out_pcixcc;
return 0;
out_pcixcc:
zcrypt_pcixcc_exit();
out_pcicc:
zcrypt_pcicc_exit();
out_pcica:
zcrypt_pcica_exit();
out_api:
zcrypt_api_exit();
out_ap:
ap_module_exit();
out:
return rc;
}
/**
* The module termination code.
*/
static void __exit zcrypt_exit(void)
{
zcrypt_cex2a_exit();
zcrypt_pcixcc_exit();
zcrypt_pcicc_exit();
zcrypt_pcica_exit();
zcrypt_api_exit();
ap_module_exit();
}
module_init(zcrypt_init);
module_exit(zcrypt_exit);
| gpl-2.0 |
donkeykang/kernel_opo_omni | drivers/mtd/chips/cfi_cmdset_0001.c | 4927 | 74088 | /*
* Common Flash Interface support:
* Intel Extended Vendor Command Set (ID 0x0001)
*
* (C) 2000 Red Hat. GPL'd
*
*
* 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
* - completely revamped method functions so they are aware and
* independent of the flash geometry (buswidth, interleave, etc.)
* - scalability vs code size is completely set at compile-time
* (see include/linux/mtd/cfi.h for selection)
* - optimized write buffer method
* 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
* - reworked lock/unlock/erase support for var size flash
* 21/03/2007 Rodolfo Giometti <giometti@linux.it>
* - auto unlock sectors on resume for auto locking flash on power up
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/bitmap.h>
#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
// debugging, turns off buffer write mode if set to 1
#define FORCE_WORD_WRITE 0
/* Intel chips */
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
/* Atmel chips */
#define AT49BV640D 0x02de
#define AT49BV640DT 0x02db
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_intelext_sync (struct mtd_info *);
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
uint64_t len);
#ifdef CONFIG_MTD_OTP
static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
struct otp_info *, size_t);
static int cfi_intelext_get_user_prot_info (struct mtd_info *,
struct otp_info *, size_t);
#endif
static int cfi_intelext_suspend (struct mtd_info *);
static void cfi_intelext_resume (struct mtd_info *);
static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
static void cfi_intelext_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
/*
* *********** SETUP AND PROBE BITS ***********
*/
static struct mtd_chip_driver cfi_intelext_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_intelext_destroy,
.name = "cfi_cmdset_0001",
.module = THIS_MODULE
};
/* #define DEBUG_LOCK_BITS */
/* #define DEBUG_CFI_FEATURES */
#ifdef DEBUG_CFI_FEATURES
static void cfi_tell_features(struct cfi_pri_intelext *extp)
{
int i;
printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
for (i=11; i<32; i++) {
if (extp->FeatureSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
for (i=1; i<8; i++) {
if (extp->SuspendCmdSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
for (i=2; i<3; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
for (i=6; i<16; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
if (extp->VppOptimal)
printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
}
#endif
/* Atmel chips don't use the same PRI format as Intel chips */
static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
struct cfi_pri_atmel atmel_pri;
uint32_t features = 0;
/* Reverse byteswapping */
extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
memcpy(&atmel_pri, extp, sizeof(atmel_pri));
memset((char *)extp + 5, 0, sizeof(*extp) - 5);
printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
if (atmel_pri.Features & 0x01) /* chip erase supported */
features |= (1<<0);
if (atmel_pri.Features & 0x02) /* erase suspend supported */
features |= (1<<1);
if (atmel_pri.Features & 0x04) /* program suspend supported */
features |= (1<<2);
if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
features |= (1<<9);
if (atmel_pri.Features & 0x20) /* page mode read supported */
features |= (1<<7);
if (atmel_pri.Features & 0x40) /* queued erase supported */
features |= (1<<4);
if (atmel_pri.Features & 0x80) /* Protection bits supported */
features |= (1<<6);
extp->FeatureSupport = features;
/* burst write mode not supported */
cfi->cfiq->BufWriteTimeoutTyp = 0;
cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
cfip->FeatureSupport |= (1 << 5);
mtd->flags |= MTD_POWERUP_LOCK;
}
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
"erase on write disabled.\n");
extp->SuspendCmdSupport &= ~1;
}
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
static void fixup_no_write_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
if (cfip && (cfip->FeatureSupport&4)) {
cfip->FeatureSupport &= ~4;
printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
}
}
#endif
static void fixup_st_m28w320ct(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
}
static void fixup_st_m28w320cb(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/* Note this is done after the region info is endian swapped */
cfi->cfiq->EraseRegionInfo[1] =
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (!mtd->_point && map_is_linear(map)) {
mtd->_point = cfi_intelext_point;
mtd->_unpoint = cfi_intelext_unpoint;
}
}
static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
printk(KERN_INFO "Using buffer write method\n" );
mtd->_write = cfi_intelext_write_buffers;
mtd->_writev = cfi_intelext_writev;
}
}
/*
* Some chips power-up with all sectors locked by default.
*/
static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
if (cfip->FeatureSupport&32) {
printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
mtd->flags |= MTD_POWERUP_LOCK;
}
}
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
#endif
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
{ 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
{ CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
{ CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
{ 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
/* The CFI vendor ids and the JEDEC vendor IDs appear
* to be common. It is like the devices id's are as
* well. This table is to pick all cases where
* we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
{ 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_intelext *extp)
{
if (cfi->mfr == CFI_MFR_INTEL &&
cfi->id == PF38F4476 && extp->MinorVersion == '3')
extp->MinorVersion = '1';
}
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
if (!extp)
return NULL;
cfi_fixup_major_minor(cfi, extp);
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
kfree(extp);
return NULL;
}
/* Do some byteswapping if necessary */
extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
if (extp->MinorVersion >= '0') {
extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
}
if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
extra_size += extp->extra[extra_size - 1];
}
if (extp->MinorVersion >= '3') {
int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
nb_parts = extp->extra[extra_size - 1];
/* skip the sizeof(partregion) field in CFI 1.4 */
if (extp->MinorVersion >= '4')
extra_size += 2;
for (i = 0; i < nb_parts; i++) {
struct cfi_intelext_regioninfo *rinfo;
rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
extra_size += sizeof(*rinfo);
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
extra_size += (rinfo->NumBlockTypes - 1)
* sizeof(struct cfi_intelext_blockinfo);
}
if (extp->MinorVersion >= '4')
extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
if (extp_size < sizeof(*extp) + extra_size) {
need_more:
extp_size = sizeof(*extp) + extra_size;
kfree(extp);
if (extp_size > 4096) {
printk(KERN_ERR
"%s: cfi_pri_intelext is too fat\n",
__func__);
return NULL;
}
goto again;
}
}
return extp;
}
struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_info *mtd;
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd) {
printk(KERN_ERR "Failed to allocate memory for MTD device\n");
return NULL;
}
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
mtd->_erase = cfi_intelext_erase_varsize;
mtd->_read = cfi_intelext_read;
mtd->_write = cfi_intelext_write_words;
mtd->_sync = cfi_intelext_sync;
mtd->_lock = cfi_intelext_lock;
mtd->_unlock = cfi_intelext_unlock;
mtd->_is_locked = cfi_intelext_is_locked;
mtd->_suspend = cfi_intelext_suspend;
mtd->_resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
if (cfi->cfi_mode == CFI_MODE_CFI) {
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure. So we read the feature
* table from it.
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_intelext *extp;
extp = read_pri_intelext(map, adr);
if (!extp) {
kfree(mtd);
return NULL;
}
/* Install our own private info structure */
cfi->cmdset_priv = extp;
cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
#endif
if(extp->SuspendCmdSupport & 1) {
printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
}
}
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
/* Apply jedec specific fixups */
cfi_fixup(mtd, jedec_fixup_table);
}
/* Apply generic fixups */
cfi_fixup(mtd, fixup_table);
for (i=0; i< cfi->numchips; i++) {
if (cfi->cfiq->WordWriteTimeoutTyp)
cfi->chips[i].word_write_time =
1<<cfi->cfiq->WordWriteTimeoutTyp;
else
cfi->chips[i].word_write_time = 50000;
if (cfi->cfiq->BufWriteTimeoutTyp)
cfi->chips[i].buffer_write_time =
1<<cfi->cfiq->BufWriteTimeoutTyp;
/* No default; if it isn't specified, we won't use it */
if (cfi->cfiq->BlockEraseTimeoutTyp)
cfi->chips[i].erase_time =
1000<<cfi->cfiq->BlockEraseTimeoutTyp;
else
cfi->chips[i].erase_time = 2000000;
if (cfi->cfiq->WordWriteTimeoutTyp &&
cfi->cfiq->WordWriteTimeoutMax)
cfi->chips[i].word_write_time_max =
1<<(cfi->cfiq->WordWriteTimeoutTyp +
cfi->cfiq->WordWriteTimeoutMax);
else
cfi->chips[i].word_write_time_max = 50000 * 8;
if (cfi->cfiq->BufWriteTimeoutTyp &&
cfi->cfiq->BufWriteTimeoutMax)
cfi->chips[i].buffer_write_time_max =
1<<(cfi->cfiq->BufWriteTimeoutTyp +
cfi->cfiq->BufWriteTimeoutMax);
if (cfi->cfiq->BlockEraseTimeoutTyp &&
cfi->cfiq->BlockEraseTimeoutMax)
cfi->chips[i].erase_time_max =
1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
cfi->cfiq->BlockEraseTimeoutMax);
else
cfi->chips[i].erase_time_max = 2000000 * 8;
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
map->fldrv = &cfi_intelext_chipdrv;
return cfi_intelext_setup(mtd);
}
struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long offset = 0;
int i,j;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
if (!mtd->eraseregions) {
printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
goto setup_err;
}
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
if (mtd->erasesize < ersize) {
mtd->erasesize = ersize;
}
for (j=0; j<cfi->numchips; j++) {
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
}
offset += (ersize * ernum);
}
if (offset != devsize) {
/* Argh */
printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
goto setup_err;
}
for (i=0; i<mtd->numeraseregions;i++){
printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
i,(unsigned long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
}
#ifdef CONFIG_MTD_OTP
mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
#endif
/* This function has the potential to distort the reality
a bit and therefore should be called last. */
if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
goto setup_err;
__module_get(THIS_MODULE);
register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
struct cfi_private **pcfi)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = *pcfi;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
/*
* Probing of multi-partition flash chips.
*
* To support multiple partitions when available, we simply arrange
* for each of them to have their own flchip structure even if they
* are on the same physical chip. This means completely recreating
* a new cfi_private structure right here which is a blatent code
* layering violation, but this is still the least intrusive
* arrangement at this point. This can be rearranged in the future
* if someone feels motivated enough. --nico
*/
if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
&& extp->FeatureSupport & (1 << 9)) {
struct cfi_private *newcfi;
struct flchip *chip;
struct flchip_shared *shared;
int offs, numregions, numparts, partshift, numvirtchips, i, j;
/* Protection Register info */
offs = (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
/* Burst Read info */
offs += extp->extra[offs+1]+2;
/* Number of partition regions */
numregions = extp->extra[offs];
offs += 1;
/* skip the sizeof(partregion) field in CFI 1.4 */
if (extp->MinorVersion >= '4')
offs += 2;
/* Number of hardware partitions */
numparts = 0;
for (i = 0; i < numregions; i++) {
struct cfi_intelext_regioninfo *rinfo;
rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
numparts += rinfo->NumIdentPartitions;
offs += sizeof(*rinfo)
+ (rinfo->NumBlockTypes - 1) *
sizeof(struct cfi_intelext_blockinfo);
}
if (!numparts)
numparts = 1;
/* Programming Region info */
if (extp->MinorVersion >= '4') {
struct cfi_intelext_programming_regioninfo *prinfo;
prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
mtd->flags &= ~MTD_BIT_WRITEABLE;
printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
map->name, mtd->writesize,
cfi->interleave * prinfo->ControlValid,
cfi->interleave * prinfo->ControlInvalid);
}
/*
* All functions below currently rely on all chips having
* the same geometry so we'll just assume that all hardware
* partitions are of the same size too.
*/
partshift = cfi->chipshift - __ffs(numparts);
if ((1 << partshift) < mtd->erasesize) {
printk( KERN_ERR
"%s: bad number of hw partitions (%d)\n",
__func__, numparts);
return -EINVAL;
}
numvirtchips = cfi->numchips * numparts;
newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!newcfi)
return -ENOMEM;
shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
if (!shared) {
kfree(newcfi);
return -ENOMEM;
}
memcpy(newcfi, cfi, sizeof(struct cfi_private));
newcfi->numchips = numvirtchips;
newcfi->chipshift = partshift;
chip = &newcfi->chips[0];
for (i = 0; i < cfi->numchips; i++) {
shared[i].writing = shared[i].erasing = NULL;
mutex_init(&shared[i].lock);
for (j = 0; j < numparts; j++) {
*chip = cfi->chips[i];
chip->start += j << partshift;
chip->priv = &shared[i];
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
mutex_init(&chip->mutex);
chip++;
}
}
printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
"--> %d partitions of %d KiB\n",
map->name, cfi->numchips, cfi->interleave,
newcfi->numchips, 1<<(newcfi->chipshift-10));
map->fldrv_priv = newcfi;
*pcfi = newcfi;
kfree(cfi);
}
return 0;
}
/*
* *********** CHIP ACCESS FUNCTIONS ***********
*/
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
DECLARE_WAITQUEUE(wait, current);
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
unsigned long timeo = jiffies + HZ;
/* Prevent setting state FL_SYNCING for chip in suspended state. */
if (mode == FL_SYNCING && chip->oldstate != FL_READY)
goto sleep;
switch (chip->state) {
case FL_STATUS:
for (;;) {
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* At this point we're fine with write operations
in other partitions as they don't conflict. */
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
/* Fall through */
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
return 0;
case FL_ERASING:
if (!cfip ||
!(cfip->FeatureSupport & 2) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
/* Erase suspend */
map_write(map, CMD(0xB0), adr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
map_write(map, CMD(0x70), adr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
if (time_after(jiffies, timeo)) {
/* Urgh. Resume and pretend we weren't here.
* Make sure we're in 'read status' mode if it had finished */
put_chip(map, chip, adr);
printk(KERN_ERR "%s: Chip not ready after erase "
"suspended: status = 0x%lx\n", map->name, status.x[0]);
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
chip->state = FL_STATUS;
return 0;
case FL_XIP_WHILE_ERASING:
if (mode != FL_READY && mode != FL_POINT &&
(mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
goto sleep;
chip->oldstate = chip->state;
chip->state = FL_READY;
return 0;
case FL_SHUTDOWN:
/* The machine is rebooting now,so no one can get chip anymore */
return -EIO;
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
/* Fall through */
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
int ret;
DECLARE_WAITQUEUE(wait, current);
retry:
if (chip->priv &&
(mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
|| mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
/*
* OK. We have possibility for contention on the write/erase
* operations which are global to the real chip and not per
* partition. So let's fight it over in the partition which
* currently has authority on the operation.
*
* The rules are as follows:
*
* - any write operation must own shared->writing.
*
* - any erase operation must own _both_ shared->writing and
* shared->erasing.
*
* - contention arbitration is handled in the owner's context.
*
* The 'shared' struct can be read and/or written only when
* its lock is taken.
*/
struct flchip_shared *shared = chip->priv;
struct flchip *contender;
mutex_lock(&shared->lock);
contender = shared->writing;
if (contender && contender != chip) {
/*
* The engine to perform desired operation on this
* partition is already in use by someone else.
* Let's fight over it in the context of the chip
* currently using it. If it is possible to suspend,
* that other partition will do just that, otherwise
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
ret = mutex_trylock(&contender->mutex);
mutex_unlock(&shared->lock);
if (!ret)
goto retry;
mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
mutex_unlock(&contender->mutex);
return ret;
}
mutex_lock(&shared->lock);
/* We should not own chip if it is already
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
mutex_unlock(&contender->mutex);
goto retry;
}
mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
* on this chip. Sleep. */
if (mode == FL_ERASING && shared->erasing
&& shared->erasing->oldstate == FL_ERASING) {
mutex_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
goto retry;
}
/* We now own it */
shared->writing = chip;
if (mode == FL_ERASING)
shared->erasing = chip;
mutex_unlock(&shared->lock);
}
ret = chip_ready(map, chip, adr, mode);
if (ret == -EAGAIN)
goto retry;
return ret;
}
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->priv) {
struct flchip_shared *shared = chip->priv;
mutex_lock(&shared->lock);
if (shared->writing == chip && chip->oldstate == FL_READY) {
/* We own the ability to write, but we're done */
shared->writing = shared->erasing;
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
mutex_lock(&loaner->mutex);
mutex_unlock(&shared->lock);
mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
mutex_lock(&chip->mutex);
mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
shared->erasing = NULL;
shared->writing = NULL;
} else if (shared->erasing == chip && shared->writing != chip) {
/*
* We own the ability to erase without the ability
* to write, which means the erase was suspended
* and some other partition is currently writing.
* Don't let the switch below mess things up since
* we don't have ownership to resume anything.
*/
mutex_unlock(&shared->lock);
wake_up(&chip->wq);
return;
}
mutex_unlock(&shared->lock);
}
switch(chip->oldstate) {
case FL_ERASING:
/* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
one in READY mode. That's bad, and caused -EROFS
errors to be returned from do_erase_oneblock because
that's the only bit it checked for at the time.
As the state machine appears to explicitly allow
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
map_write(map, CMD(0xd0), adr);
map_write(map, CMD(0x70), adr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
case FL_XIP_WHILE_ERASING:
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
break;
case FL_READY:
case FL_STATUS:
case FL_JEDEC_QUERY:
break;
default:
printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
}
wake_up(&chip->wq);
}
#ifdef CONFIG_MTD_XIP
/*
* No interrupt what so ever can be serviced while the flash isn't in array
* mode. This is ensured by the xip_disable() and xip_enable() functions
* enclosing any code path where the flash is known not to be in array mode.
* And within a XIP disabled code path, only functions marked with __xipram
* may be called and nothing else (it's a good thing to inspect generated
* assembly to make sure inline functions were actually inlined and that gcc
* didn't emit calls to its own support functions). Also configuring MTD CFI
* support to a single buswidth and a single interleave is also recommended.
*/
static void xip_disable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
/* TODO: chips with no XIP use should ignore and return */
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
local_irq_disable();
}
static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xff), adr);
chip->state = FL_READY;
}
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
}
/*
* When a delay is required for the flash operation to complete, the
* xip_wait_for_operation() function is polling for both the given timeout
* and pending (but still masked) hardware interrupts. Whenever there is an
* interrupt pending then the flash erase or write operation is suspended,
* array mode restored and interrupts unmasked. Task scheduling might also
* happen at that point. The CPU eventually returns from the interrupt or
* the call to schedule() and the suspended flash operation is resumed for
* the remaining of the delay period.
*
* Warning: this function _will_ fool interrupt latency tracing tools.
*/
static int __xipram xip_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long adr, unsigned int chip_op_time_max)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
map_word status, OK = CMD(0x80);
unsigned long usec, suspended, start, done;
flstate_t oldstate, newstate;
start = xip_currtime();
usec = chip_op_time_max;
if (usec == 0)
usec = 500000;
done = 0;
do {
cpu_relax();
if (xip_irqpending() && cfip &&
((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
(chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
(cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
/*
* Let's suspend the erase or write operation when
* supported. Note that we currently don't try to
* suspend interleaved chips if there is already
* another operation suspended (imagine what happens
* when one chip was already done with the current
* operation while another chip suspended it, then
* we resume the whole thing at once). Yes, it
* can happen!
*/
usec -= done;
map_write(map, CMD(0xb0), adr);
map_write(map, CMD(0x70), adr);
suspended = xip_currtime();
do {
if (xip_elapsed_since(suspended) > 100000) {
/*
* The chip doesn't want to suspend
* after waiting for 100 msecs.
* This is a critical error but there
* is not much we can do here.
*/
return -EIO;
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK));
/* Suspend succeeded */
oldstate = chip->state;
if (oldstate == FL_ERASING) {
if (!map_word_bitsset(map, status, CMD(0x40)))
break;
newstate = FL_XIP_WHILE_ERASING;
chip->erase_suspended = 1;
} else {
if (!map_word_bitsset(map, status, CMD(0x04)))
break;
newstate = FL_XIP_WHILE_WRITING;
chip->write_suspended = 1;
}
chip->state = newstate;
map_write(map, CMD(0xff), adr);
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
/*
* We're back. However someone else might have
* decided to go write to the chip if we are in
* a suspended erase state. If so let's wait
* until it's done.
*/
mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
/* Resume the write or erase operation */
map_write(map, CMD(0xd0), adr);
map_write(map, CMD(0x70), adr);
chip->state = oldstate;
start = xip_currtime();
} else if (usec >= 1000000/HZ) {
/*
* Try to save on CPU power when waiting delay
* is at least a system timer tick period.
* No need to be extremely accurate here.
*/
xip_cpu_idle();
}
status = map_read(map, adr);
done = xip_elapsed_since(start);
} while (!map_word_andequal(map, status, OK, OK)
&& done < usec);
return (done >= usec) ? -ETIME : 0;
}
/*
* The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
* the flash is actively programming or erasing since we have to poll for
* the operation to complete anyway. We can't do that in a generic way with
* a XIP setup so do it before the actual flash operation in this case
* and stub it out from INVAL_CACHE_AND_WAIT.
*/
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
xip_wait_for_operation(map, chip, cmd_adr, usec_max)
#else
#define xip_disable(map, chip, adr)
#define xip_enable(map, chip, adr)
#define XIP_INVAL_CACHED_RANGE(x...)
#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
static int inval_cache_and_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
unsigned int chip_op_time, unsigned int chip_op_time_max)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
timeo = 500000;
reset_timeo = timeo;
sleep_time = chip_op_time / 2;
for (;;) {
if (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
continue;
}
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occurred while sleep: reset timeout */
timeo = reset_timeo;
chip->erase_suspended = 0;
}
if (chip->write_suspended && chip_state == FL_WRITING) {
/* Write suspend occurred while sleep: reset timeout */
timeo = reset_timeo;
chip->write_suspended = 0;
}
if (!timeo) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
return -ETIME;
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
* can be performed with a sleeping delay instead
* of busy waiting.
*/
msleep(sleep_time/1000);
timeo -= sleep_time;
sleep_time = 1000000/HZ;
} else {
udelay(1);
cond_resched();
timeo--;
}
mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
return 0;
}
#endif
#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
if (!ret) {
if (chip->state != FL_POINT && chip->state != FL_READY)
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_POINT;
chip->ref_point_counter++;
}
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, last_end = 0;
int chipnum;
int ret = 0;
if (!map->virt)
return -EINVAL;
/* Now lock the chip(s) to POINT state */
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
*virt = map->virt + cfi->chips[chipnum].start + ofs;
if (phys)
*phys = map->phys + cfi->chips[chipnum].start + ofs;
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
/* We cannot point across chips that are virtually disjoint */
if (!last_end)
last_end = cfi->chips[chipnum].start;
else if (cfi->chips[chipnum].start != last_end)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
if (ret)
break;
*retlen += thislen;
len -= thislen;
ofs = 0;
last_end += 1 << cfi->chipshift;
chipnum++;
}
return 0;
}
static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum, err = 0;
/* Now unlock the chip(s) POINT state */
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
while (len && !err) {
unsigned long thislen;
struct flchip *chip;
chip = &cfi->chips[chipnum];
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
chip->state = FL_READY;
} else {
printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
err = -EINVAL;
}
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
chipnum++;
}
return err;
}
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
}
map_copy_from(map, buf, adr, len);
put_chip(map, chip, cmd_addr);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum, int mode)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd;
int ret=0;
adr += chip->start;
switch (mode) {
case FL_WRITING:
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
break;
case FL_OTP_WRITE:
write_cmd = CMD(0xc0);
break;
default:
return -EINVAL;
}
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
xip_disable(map, chip, adr);
map_write(map, write_cmd, adr);
map_write(map, datum, adr);
chip->state = mode;
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, map_bankwidth(map),
chip->word_write_time,
chip->word_write_time_max);
if (ret) {
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
goto out;
}
/* check for errors */
status = map_read(map, adr);
if (map_word_bitsset(map, status, CMD(0x1a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* reset status */
map_write(map, CMD(0x50), adr);
map_write(map, CMD(0x70), adr);
xip_enable(map, chip, adr);
if (chipstatus & 0x02) {
ret = -EROFS;
} else if (chipstatus & 0x08) {
printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
ret = -EIO;
} else {
printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
}
goto out;
}
xip_enable(map, chip, adr);
out: DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
int chipnum;
unsigned long ofs;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first byte write */
if (ofs & (map_bankwidth(map)-1)) {
unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
int gap = ofs - bus_ofs;
int n;
map_word datum;
n = min_t(int, len, map_bankwidth(map)-gap);
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, datum, FL_WRITING);
if (ret)
return ret;
len -= n;
ofs += n;
buf += n;
(*retlen) += n;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
while(len >= map_bankwidth(map)) {
map_word datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum, FL_WRITING);
if (ret)
return ret;
ofs += map_bankwidth(map);
buf += map_bankwidth(map);
(*retlen) += map_bankwidth(map);
len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
if (len & (map_bankwidth(map)-1)) {
map_word datum;
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum, FL_WRITING);
if (ret)
return ret;
(*retlen) += len;
}
return 0;
}
static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const struct kvec **pvec,
unsigned long *pvec_seek, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd, datum;
unsigned long cmd_adr;
int ret, wbufsize, word_gap, words;
const struct kvec *vec;
unsigned long vec_seek;
unsigned long initial_adr;
int initial_len = len;
wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
adr += chip->start;
initial_adr = adr;
cmd_adr = adr & ~(wbufsize-1);
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
ENABLE_VPP(map);
xip_disable(map, chip, cmd_adr);
/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
[...], the device will not accept any more Write to Buffer commands".
So we must check here and reset those bits if they're set. Otherwise
we're just pissing in the wind */
if (chip->state != FL_STATUS) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
}
status = map_read(map, cmd_adr);
if (map_word_bitsset(map, status, CMD(0x30))) {
xip_enable(map, chip, cmd_adr);
printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
xip_disable(map, chip, cmd_adr);
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
}
chip->state = FL_WRITING_TO_BUFFER;
map_write(map, write_cmd, cmd_adr);
ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
if (ret) {
/* Argh. Not ready for write to buffer */
map_word Xstatus = map_read(map, cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
status = map_read(map, cmd_adr);
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
xip_enable(map, chip, cmd_adr);
printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
map->name, Xstatus.x[0], status.x[0]);
goto out;
}
/* Figure out the number of words to write */
word_gap = (-adr & (map_bankwidth(map)-1));
words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
if (!word_gap) {
words--;
} else {
word_gap = map_bankwidth(map) - word_gap;
adr -= word_gap;
datum = map_word_ff(map);
}
/* Write length of data to come */
map_write(map, CMD(words), cmd_adr );
/* Write data */
vec = *pvec;
vec_seek = *pvec_seek;
do {
int n = map_bankwidth(map) - word_gap;
if (n > vec->iov_len - vec_seek)
n = vec->iov_len - vec_seek;
if (n > len)
n = len;
if (!word_gap && len < map_bankwidth(map))
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum,
vec->iov_base + vec_seek,
word_gap, n);
len -= n;
word_gap += n;
if (!len || word_gap == map_bankwidth(map)) {
map_write(map, datum, adr);
adr += map_bankwidth(map);
word_gap = 0;
}
vec_seek += n;
if (vec_seek == vec->iov_len) {
vec++;
vec_seek = 0;
}
} while (len);
*pvec = vec;
*pvec_seek = vec_seek;
/* GO GO GO */
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
initial_adr, initial_len,
chip->buffer_write_time,
chip->buffer_write_time_max);
if (ret) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
xip_enable(map, chip, cmd_adr);
printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
goto out;
}
/* check for errors */
status = map_read(map, cmd_adr);
if (map_word_bitsset(map, status, CMD(0x1a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* reset status */
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
xip_enable(map, chip, cmd_adr);
if (chipstatus & 0x02) {
ret = -EROFS;
} else if (chipstatus & 0x08) {
printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
ret = -EIO;
} else {
printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
}
goto out;
}
xip_enable(map, chip, cmd_adr);
out: DISABLE_VPP(map);
put_chip(map, chip, cmd_adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
int ret = 0;
int chipnum;
unsigned long ofs, vec_seek, i;
size_t len = 0;
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
if (!len)
return 0;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
vec_seek = 0;
do {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len;
ret = do_write_buffer(map, &cfi->chips[chipnum],
ofs, &vecs, &vec_seek, size);
if (ret)
return ret;
ofs += size;
(*retlen) += size;
len -= size;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
/* Be nice and reschedule with the chip in a usable state for other
processes. */
cond_resched();
} while (len);
return 0;
}
static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct kvec vec;
vec.iov_base = (void *) buf;
vec.iov_len = len;
return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
}
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status;
int retries = 3;
int ret;
adr += chip->start;
retry:
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
/* Clear the status register first */
map_write(map, CMD(0x50), adr);
/* Now erase */
map_write(map, CMD(0x20), adr);
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
chip->erase_time,
chip->erase_time_max);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
goto out;
}
/* We've broken this before. It doesn't hurt to be safe */
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
status = map_read(map, adr);
/* check for errors */
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* Reset the error bits */
map_write(map, CMD(0x50), adr);
map_write(map, CMD(0x70), adr);
xip_enable(map, chip, adr);
if ((chipstatus & 0x30) == 0x30) {
printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
} else if (chipstatus & 0x02) {
/* Protection bit set */
ret = -EROFS;
} else if (chipstatus & 0x8) {
/* Voltage */
printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
ret = -EIO;
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
ret = -EIO;
}
goto out;
}
xip_enable(map, chip, adr);
out: DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
{
unsigned long ofs, len;
int ret;
ofs = instr->addr;
len = instr->len;
ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
if (ret)
return ret;
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static void cfi_intelext_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
static int __xipram do_getlockstatus_oneblock(struct map_info *map,
struct flchip *chip,
unsigned long adr,
int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int status, ofs_factor = cfi->interleave * cfi->device_type;
adr += chip->start;
xip_disable(map, chip, adr+(2*ofs_factor));
map_write(map, CMD(0x90), adr+(2*ofs_factor));
chip->state = FL_JEDEC_QUERY;
status = cfi_read_query(map, adr+(2*ofs_factor));
xip_enable(map, chip, 0);
return status;
}
#ifdef DEBUG_LOCK_BITS
static int __xipram do_printlockstatus_oneblock(struct map_info *map,
struct flchip *chip,
unsigned long adr,
int len, void *thunk)
{
printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
return 0;
}
#endif
#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int udelay;
int ret;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
ENABLE_VPP(map);
xip_disable(map, chip, adr);
map_write(map, CMD(0x60), adr);
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
} else
BUG();
/*
* If Instant Individual Block Locking supported then no need
* to delay.
*/
udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
goto out;
}
xip_enable(map, chip, adr);
out: DISABLE_VPP(map);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
__func__, ret);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
return ret;
}
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
__func__, ret);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
return ret;
}
static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
uint64_t len)
{
return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
ofs, len, NULL) ? 1 : 0;
}
#ifdef CONFIG_MTD_OTP
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
u_long data_offset, u_char *buf, u_int size,
u_long prot_offset, u_int groupno, u_int groupsize);
static int __xipram
do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
/* let's ensure we're not reading back cached data from array mode */
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
xip_disable(map, chip, chip->start);
if (chip->state != FL_JEDEC_QUERY) {
map_write(map, CMD(0x90), chip->start);
chip->state = FL_JEDEC_QUERY;
}
map_copy_from(map, buf, chip->start + offset, size);
xip_enable(map, chip, chip->start);
/* then ensure we don't keep OTP data in the cache */
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
return 0;
}
static int
do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
int ret;
while (size) {
unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
int gap = offset - bus_ofs;
int n = min_t(int, size, map_bankwidth(map)-gap);
map_word datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
if (ret)
return ret;
offset += n;
buf += n;
size -= n;
}
return 0;
}
static int
do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word datum;
/* make sure area matches group boundaries */
if (size != grpsz)
return -EXDEV;
datum = map_word_ff(map);
datum = map_word_clr(map, datum, CMD(1 << grpno));
return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
}
static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf,
otp_op_t action, int user_regs)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
struct flchip *chip;
struct cfi_intelext_otpinfo *otp;
u_long devsize, reg_prot_offset, data_offset;
u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
int ret;
*retlen = 0;
/* Check that we actually have some OTP registers */
if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
return -ENODATA;
/* we need real chips here not virtual ones */
devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
chip_step = devsize >> cfi->chipshift;
chip_num = 0;
/* Some chips have OTP located in the _top_ partition only.
For example: Intel 28F256L18T (T means top-parameter device) */
if (cfi->mfr == CFI_MFR_INTEL) {
switch (cfi->id) {
case 0x880b:
case 0x880c:
case 0x880d:
chip_num = chip_step - 1;
}
}
for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
chip = &cfi->chips[chip_num];
otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
/* first OTP region */
field = 0;
reg_prot_offset = extp->ProtRegAddr;
reg_fact_groups = 1;
reg_fact_size = 1 << extp->FactProtRegSize;
reg_user_groups = 1;
reg_user_size = 1 << extp->UserProtRegSize;
while (len > 0) {
/* flash geometry fixup */
data_offset = reg_prot_offset + 1;
data_offset *= cfi->interleave * cfi->device_type;
reg_prot_offset *= cfi->interleave * cfi->device_type;
reg_fact_size *= cfi->interleave;
reg_user_size *= cfi->interleave;
if (user_regs) {
groups = reg_user_groups;
groupsize = reg_user_size;
/* skip over factory reg area */
groupno = reg_fact_groups;
data_offset += reg_fact_groups * reg_fact_size;
} else {
groups = reg_fact_groups;
groupsize = reg_fact_size;
groupno = 0;
}
while (len > 0 && groups > 0) {
if (!action) {
/*
* Special case: if action is NULL
* we fill buf with otp_info records.
*/
struct otp_info *otpinfo;
map_word lockword;
len -= sizeof(struct otp_info);
if (len <= 0)
return -ENOSPC;
ret = do_otp_read(map, chip,
reg_prot_offset,
(u_char *)&lockword,
map_bankwidth(map),
0, 0, 0);
if (ret)
return ret;
otpinfo = (struct otp_info *)buf;
otpinfo->start = from;
otpinfo->length = groupsize;
otpinfo->locked =
!map_word_bitsset(map, lockword,
CMD(1 << groupno));
from += groupsize;
buf += sizeof(*otpinfo);
*retlen += sizeof(*otpinfo);
} else if (from >= groupsize) {
from -= groupsize;
data_offset += groupsize;
} else {
int size = groupsize;
data_offset += from;
size -= from;
from = 0;
if (size > len)
size = len;
ret = action(map, chip, data_offset,
buf, size, reg_prot_offset,
groupno, groupsize);
if (ret < 0)
return ret;
buf += size;
len -= size;
*retlen += size;
data_offset += size;
}
groupno++;
groups--;
}
/* next OTP region */
if (++field == extp->NumProtectionFields)
break;
reg_prot_offset = otp->ProtRegAddr;
reg_fact_groups = otp->FactGroups;
reg_fact_size = 1 << otp->FactProtRegSize;
reg_user_groups = otp->UserGroups;
reg_user_size = 1 << otp->UserProtRegSize;
otp++;
}
}
return 0;
}
static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_read, 0);
}
static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_read, 1);
}
static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_write, 1);
}
static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
loff_t from, size_t len)
{
size_t retlen;
return cfi_intelext_otp_walk(mtd, from, len, &retlen,
NULL, do_otp_lock, 1);
}
static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
size_t retlen;
int ret;
ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
return ret ? : retlen;
}
static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
size_t retlen;
int ret;
ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
return ret ? : retlen;
}
#endif
static void cfi_intelext_save_locks(struct mtd_info *mtd)
{
struct mtd_erase_region_info *region;
int block, status, i;
unsigned long adr;
size_t len;
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (!region->lockmap)
continue;
for (block = 0; block < region->numblocks; block++){
len = region->erasesize;
adr = region->offset + block * len;
status = cfi_varsize_frob(mtd,
do_getlockstatus_oneblock, adr, len, NULL);
if (status)
set_bit(block, region->lockmap);
else
clear_bit(block, region->lockmap);
}
}
}
static int cfi_intelext_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int i;
struct flchip *chip;
int ret = 0;
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_save_locks(mtd);
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
if (chip->oldstate == FL_READY) {
/* place the chip in a known state before suspend */
map_write(map, CMD(0xFF), cfi->chips[i].start);
chip->oldstate = chip->state;
chip->state = FL_PM_SUSPENDED;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
} else {
/* There seems to be an operation pending. We must wait for it. */
printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
ret = -EAGAIN;
}
break;
default:
/* Should we actually wait? Once upon a time these routines weren't
allowed to. Or should we return -EAGAIN, because the upper layers
ought to have already shut down anything which was using the device
anyway? The latter for now. */
printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
ret = -EAGAIN;
case FL_PM_SUSPENDED:
break;
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
if (ret) {
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
because we're returning failure, and it didn't
get power cycled */
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
return ret;
}
static void cfi_intelext_restore_locks(struct mtd_info *mtd)
{
struct mtd_erase_region_info *region;
int block, i;
unsigned long adr;
size_t len;
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (!region->lockmap)
continue;
for_each_clear_bit(block, region->lockmap, region->numblocks) {
len = region->erasesize;
adr = region->offset + block * len;
cfi_intelext_unlock(mtd, adr, len);
}
}
}
static void cfi_intelext_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int i;
struct flchip *chip;
for (i=0; i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
map_write(map, CMD(0xFF), cfi->chips[i].start);
chip->oldstate = chip->state = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_restore_locks(mtd);
}
static int cfi_intelext_reset(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i, ret;
for (i=0; i < cfi->numchips; i++) {
struct flchip *chip = &cfi->chips[i];
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
mutex_unlock(&chip->mutex);
}
return 0;
}
static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
void *v)
{
struct mtd_info *mtd;
mtd = container_of(nb, struct mtd_info, reboot_notifier);
cfi_intelext_reset(mtd);
return NOTIFY_DONE;
}
static void cfi_intelext_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_erase_region_info *region;
int i;
cfi_intelext_reset(mtd);
unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi->chips[0].priv);
kfree(cfi);
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (region->lockmap)
kfree(region->lockmap);
}
kfree(mtd->eraseregions);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
MODULE_ALIAS("cfi_cmdset_0003");
MODULE_ALIAS("cfi_cmdset_0200");
| gpl-2.0 |
crdroid-devices/android_kernel_lge_hammerhead | fs/fifo.c | 4927 | 3277 | /*
* linux/fs/fifo.c
*
* written by Paul H. Hargrove
*
* Fixes:
* 10-06-1999, AV: fixed OOM handling in fifo_open(), moved
* initialization there, switched to external
* allocation of pipe_inode_info.
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/pipe_fs_i.h>
static void wait_for_partner(struct inode* inode, unsigned int *cnt)
{
int cur = *cnt;
while (cur == *cnt) {
pipe_wait(inode->i_pipe);
if (signal_pending(current))
break;
}
}
static void wake_up_partner(struct inode* inode)
{
wake_up_interruptible(&inode->i_pipe->wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
{
struct pipe_inode_info *pipe;
int ret;
mutex_lock(&inode->i_mutex);
pipe = inode->i_pipe;
if (!pipe) {
ret = -ENOMEM;
pipe = alloc_pipe_info(inode);
if (!pipe)
goto err_nocleanup;
inode->i_pipe = pipe;
}
filp->f_version = 0;
/* We can only do regular read/write on fifos */
filp->f_mode &= (FMODE_READ | FMODE_WRITE);
switch (filp->f_mode) {
case FMODE_READ:
/*
* O_RDONLY
* POSIX.1 says that O_NONBLOCK means return with the FIFO
* opened, even when there is no process writing the FIFO.
*/
filp->f_op = &read_pipefifo_fops;
pipe->r_counter++;
if (pipe->readers++ == 0)
wake_up_partner(inode);
if (!pipe->writers) {
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress POLLHUP until we have
* seen a writer */
filp->f_version = pipe->w_counter;
} else {
wait_for_partner(inode, &pipe->w_counter);
if(signal_pending(current))
goto err_rd;
}
}
break;
case FMODE_WRITE:
/*
* O_WRONLY
* POSIX.1 says that O_NONBLOCK means return -1 with
* errno=ENXIO when there is no process reading the FIFO.
*/
ret = -ENXIO;
if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
goto err;
filp->f_op = &write_pipefifo_fops;
pipe->w_counter++;
if (!pipe->writers++)
wake_up_partner(inode);
if (!pipe->readers) {
wait_for_partner(inode, &pipe->r_counter);
if (signal_pending(current))
goto err_wr;
}
break;
case FMODE_READ | FMODE_WRITE:
/*
* O_RDWR
* POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
* This implementation will NEVER block on a O_RDWR open, since
* the process can at least talk to itself.
*/
filp->f_op = &rdwr_pipefifo_fops;
pipe->readers++;
pipe->writers++;
pipe->r_counter++;
pipe->w_counter++;
if (pipe->readers == 1 || pipe->writers == 1)
wake_up_partner(inode);
break;
default:
ret = -EINVAL;
goto err;
}
/* Ok! */
mutex_unlock(&inode->i_mutex);
return 0;
err_rd:
if (!--pipe->readers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err_wr:
if (!--pipe->writers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err:
if (!pipe->readers && !pipe->writers)
free_pipe_info(inode);
err_nocleanup:
mutex_unlock(&inode->i_mutex);
return ret;
}
/*
* Dummy default file-operations: the only thing this does
* is contain the open that then fills in the correct operations
* depending on the access mode of the file...
*/
const struct file_operations def_fifo_fops = {
.open = fifo_open, /* will set read_ or write_pipefifo_fops */
.llseek = noop_llseek,
};
| gpl-2.0 |
NaokiXie/android_kernel_samsung_wilcox | drivers/isdn/mISDN/dsp_pipeline.c | 7231 | 8519 | /*
* dsp_pipeline.c: pipelined audio processing
*
* Copyright (C) 2007, Nadi Sarrar
*
* Nadi Sarrar <nadi@beronet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include <linux/export.h>
#include "dsp.h"
#include "dsp_hwec.h"
/* uncomment for debugging */
/*#define PIPELINE_DEBUG*/
struct dsp_pipeline_entry {
struct mISDN_dsp_element *elem;
void *p;
struct list_head list;
};
struct dsp_element_entry {
struct mISDN_dsp_element *elem;
struct device dev;
struct list_head list;
};
static LIST_HEAD(dsp_elements);
/* sysfs */
static struct class *elements_class;
static ssize_t
attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mISDN_dsp_element *elem = dev_get_drvdata(dev);
int i;
char *p = buf;
*buf = 0;
for (i = 0; i < elem->num_args; i++)
p += sprintf(p, "Name: %s\n%s%s%sDescription: %s\n\n",
elem->args[i].name,
elem->args[i].def ? "Default: " : "",
elem->args[i].def ? elem->args[i].def : "",
elem->args[i].def ? "\n" : "",
elem->args[i].desc);
return p - buf;
}
static struct device_attribute element_attributes[] = {
__ATTR(args, 0444, attr_show_args, NULL),
};
static void
mISDN_dsp_dev_release(struct device *dev)
{
struct dsp_element_entry *entry =
container_of(dev, struct dsp_element_entry, dev);
list_del(&entry->list);
kfree(entry);
}
int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
{
struct dsp_element_entry *entry;
int ret, i;
if (!elem)
return -EINVAL;
entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
entry->elem = elem;
entry->dev.class = elements_class;
entry->dev.release = mISDN_dsp_dev_release;
dev_set_drvdata(&entry->dev, elem);
dev_set_name(&entry->dev, elem->name);
ret = device_register(&entry->dev);
if (ret) {
printk(KERN_ERR "%s: failed to register %s\n",
__func__, elem->name);
goto err1;
}
list_add_tail(&entry->list, &dsp_elements);
for (i = 0; i < ARRAY_SIZE(element_attributes); ++i) {
ret = device_create_file(&entry->dev,
&element_attributes[i]);
if (ret) {
printk(KERN_ERR "%s: failed to create device file\n",
__func__);
goto err2;
}
}
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name);
#endif
return 0;
err2:
device_unregister(&entry->dev);
return ret;
err1:
kfree(entry);
return ret;
}
EXPORT_SYMBOL(mISDN_dsp_element_register);
void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem)
{
struct dsp_element_entry *entry, *n;
if (!elem)
return;
list_for_each_entry_safe(entry, n, &dsp_elements, list)
if (entry->elem == elem) {
device_unregister(&entry->dev);
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: %s unregistered\n",
__func__, elem->name);
#endif
return;
}
printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
}
EXPORT_SYMBOL(mISDN_dsp_element_unregister);
int dsp_pipeline_module_init(void)
{
elements_class = class_create(THIS_MODULE, "dsp_pipeline");
if (IS_ERR(elements_class))
return PTR_ERR(elements_class);
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__);
#endif
dsp_hwec_init();
return 0;
}
void dsp_pipeline_module_exit(void)
{
struct dsp_element_entry *entry, *n;
dsp_hwec_exit();
class_destroy(elements_class);
list_for_each_entry_safe(entry, n, &dsp_elements, list) {
list_del(&entry->list);
printk(KERN_WARNING "%s: element was still registered: %s\n",
__func__, entry->elem->name);
kfree(entry);
}
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__);
#endif
}
int dsp_pipeline_init(struct dsp_pipeline *pipeline)
{
if (!pipeline)
return -EINVAL;
INIT_LIST_HEAD(&pipeline->list);
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__);
#endif
return 0;
}
static inline void _dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
{
struct dsp_pipeline_entry *entry, *n;
list_for_each_entry_safe(entry, n, &pipeline->list, list) {
list_del(&entry->list);
if (entry->elem == dsp_hwec)
dsp_hwec_disable(container_of(pipeline, struct dsp,
pipeline));
else
entry->elem->free(entry->p);
kfree(entry);
}
}
void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
{
if (!pipeline)
return;
_dsp_pipeline_destroy(pipeline);
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__);
#endif
}
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
{
int len, incomplete = 0, found = 0;
char *dup, *tok, *name, *args;
struct dsp_element_entry *entry, *n;
struct dsp_pipeline_entry *pipeline_entry;
struct mISDN_dsp_element *elem;
if (!pipeline)
return -EINVAL;
if (!list_empty(&pipeline->list))
_dsp_pipeline_destroy(pipeline);
if (!cfg)
return 0;
len = strlen(cfg);
if (!len)
return 0;
dup = kmalloc(len + 1, GFP_ATOMIC);
if (!dup)
return 0;
strcpy(dup, cfg);
while ((tok = strsep(&dup, "|"))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "(");
args = strsep(&tok, ")");
if (args && !*args)
args = NULL;
list_for_each_entry_safe(entry, n, &dsp_elements, list)
if (!strcmp(entry->elem->name, name)) {
elem = entry->elem;
pipeline_entry = kmalloc(sizeof(struct
dsp_pipeline_entry), GFP_ATOMIC);
if (!pipeline_entry) {
printk(KERN_ERR "%s: failed to add "
"entry to pipeline: %s (out of "
"memory)\n", __func__, elem->name);
incomplete = 1;
goto _out;
}
pipeline_entry->elem = elem;
if (elem == dsp_hwec) {
/* This is a hack to make the hwec
available as a pipeline module */
dsp_hwec_enable(container_of(pipeline,
struct dsp, pipeline), args);
list_add_tail(&pipeline_entry->list,
&pipeline->list);
} else {
pipeline_entry->p = elem->new(args);
if (pipeline_entry->p) {
list_add_tail(&pipeline_entry->
list, &pipeline->list);
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: created "
"instance of %s%s%s\n",
__func__, name, args ?
" with args " : "", args ?
args : "");
#endif
} else {
printk(KERN_ERR "%s: failed "
"to add entry to pipeline: "
"%s (new() returned NULL)\n",
__func__, elem->name);
kfree(pipeline_entry);
incomplete = 1;
}
}
found = 1;
break;
}
if (found)
found = 0;
else {
printk(KERN_ERR "%s: element not found, skipping: "
"%s\n", __func__, name);
incomplete = 1;
}
}
_out:
if (!list_empty(&pipeline->list))
pipeline->inuse = 1;
else
pipeline->inuse = 0;
#ifdef PIPELINE_DEBUG
printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n",
__func__, incomplete ? " incomplete" : "", cfg);
#endif
kfree(dup);
return 0;
}
void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data, int len)
{
struct dsp_pipeline_entry *entry;
if (!pipeline)
return;
list_for_each_entry(entry, &pipeline->list, list)
if (entry->elem->process_tx)
entry->elem->process_tx(entry->p, data, len);
}
void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data, int len,
unsigned int txlen)
{
struct dsp_pipeline_entry *entry;
if (!pipeline)
return;
list_for_each_entry_reverse(entry, &pipeline->list, list)
if (entry->elem->process_rx)
entry->elem->process_rx(entry->p, data, len, txlen);
}
| gpl-2.0 |
nvertigo/AK-OnePone | drivers/net/wireless/hostap/hostap_80211_tx.c | 7743 | 16387 | #include <linux/slab.h>
#include <linux/export.h>
#include "hostap_80211.h"
#include "hostap_common.h"
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static unsigned char rfc1042_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static unsigned char bridge_tunnel_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
/* No encapsulation header if EtherType < 0x600 (=length) */
void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
u16 fc;
hdr = (struct ieee80211_hdr *) skb->data;
printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n",
name, skb->len, jiffies);
if (skb->len < 2)
return;
fc = le16_to_cpu(hdr->frame_control);
printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s",
fc, (fc & IEEE80211_FCTL_FTYPE) >> 2,
(fc & IEEE80211_FCTL_STYPE) >> 4,
fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
if (skb->len < IEEE80211_DATA_HDR3_LEN) {
printk("\n");
return;
}
printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
le16_to_cpu(hdr->seq_ctrl));
printk(KERN_DEBUG " A1=%pM", hdr->addr1);
printk(" A2=%pM", hdr->addr2);
printk(" A3=%pM", hdr->addr3);
if (skb->len >= 30)
printk(" A4=%pM", hdr->addr4);
printk("\n");
}
/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
* Convert Ethernet header into a suitable IEEE 802.11 header depending on
* device configuration. */
netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
int need_headroom, need_tailroom = 0;
struct ieee80211_hdr hdr;
u16 fc, ethertype = 0;
enum {
WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
} use_wds = WDS_NO;
u8 *encaps_data;
int hdr_len, encaps_len, skip_header_bytes;
int to_assoc_ap = 0;
struct hostap_skb_tx_data *meta;
iface = netdev_priv(dev);
local = iface->local;
if (skb->len < ETH_HLEN) {
printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
"(len=%d)\n", dev->name, skb->len);
kfree_skb(skb);
return NETDEV_TX_OK;
}
if (local->ddev != dev) {
use_wds = (local->iw_mode == IW_MODE_MASTER &&
!(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
if (dev == local->stadev) {
to_assoc_ap = 1;
use_wds = WDS_NO;
} else if (dev == local->apdev) {
printk(KERN_DEBUG "%s: prism2_tx: trying to use "
"AP device with Ethernet net dev\n", dev->name);
kfree_skb(skb);
return NETDEV_TX_OK;
}
} else {
if (local->iw_mode == IW_MODE_REPEAT) {
printk(KERN_DEBUG "%s: prism2_tx: trying to use "
"non-WDS link in Repeater mode\n", dev->name);
kfree_skb(skb);
return NETDEV_TX_OK;
} else if (local->iw_mode == IW_MODE_INFRA &&
(local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
memcmp(skb->data + ETH_ALEN, dev->dev_addr,
ETH_ALEN) != 0) {
/* AP client mode: send frames with foreign src addr
* using 4-addr WDS frames */
use_wds = WDS_COMPLIANT_FRAME;
}
}
/* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
* ==>
* Prism2 TX frame with 802.11 header:
* txdesc (address order depending on used mode; includes dst_addr and
* src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
* proto[2], payload {, possible addr4[6]} */
ethertype = (skb->data[12] << 8) | skb->data[13];
memset(&hdr, 0, sizeof(hdr));
/* Length of data after IEEE 802.11 header */
encaps_data = NULL;
encaps_len = 0;
skip_header_bytes = ETH_HLEN;
if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
encaps_data = bridge_tunnel_header;
encaps_len = sizeof(bridge_tunnel_header);
skip_header_bytes -= 2;
} else if (ethertype >= 0x600) {
encaps_data = rfc1042_header;
encaps_len = sizeof(rfc1042_header);
skip_header_bytes -= 2;
}
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
hdr_len = IEEE80211_DATA_HDR3_LEN;
if (use_wds != WDS_NO) {
/* Note! Prism2 station firmware has problems with sending real
* 802.11 frames with four addresses; until these problems can
* be fixed or worked around, 4-addr frames needed for WDS are
* using incompatible format: FromDS flag is not set and the
* fourth address is added after the frame payload; it is
* assumed, that the receiving station knows how to handle this
* frame format */
if (use_wds == WDS_COMPLIANT_FRAME) {
fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
/* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
* Addr4 = SA */
skb_copy_from_linear_data_offset(skb, ETH_ALEN,
&hdr.addr4, ETH_ALEN);
hdr_len += ETH_ALEN;
} else {
/* bogus 4-addr format to workaround Prism2 station
* f/w bug */
fc |= IEEE80211_FCTL_TODS;
/* From DS: Addr1 = DA (used as RA),
* Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
*/
/* SA from skb->data + ETH_ALEN will be added after
* frame payload; use hdr.addr4 as a temporary buffer
*/
skb_copy_from_linear_data_offset(skb, ETH_ALEN,
&hdr.addr4, ETH_ALEN);
need_tailroom += ETH_ALEN;
}
/* send broadcast and multicast frames to broadcast RA, if
* configured; otherwise, use unicast RA of the WDS link */
if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
skb->data[0] & 0x01)
memset(&hdr.addr1, 0xff, ETH_ALEN);
else if (iface->type == HOSTAP_INTERFACE_WDS)
memcpy(&hdr.addr1, iface->u.wds.remote_addr,
ETH_ALEN);
else
memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
} else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
fc |= IEEE80211_FCTL_FROMDS;
/* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
ETH_ALEN);
} else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
fc |= IEEE80211_FCTL_TODS;
/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
memcpy(&hdr.addr1, to_assoc_ap ?
local->assoc_ap_addr : local->bssid, ETH_ALEN);
skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
ETH_ALEN);
skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
} else if (local->iw_mode == IW_MODE_ADHOC) {
/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
ETH_ALEN);
memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
}
hdr.frame_control = cpu_to_le16(fc);
skb_pull(skb, skip_header_bytes);
need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
if (skb_tailroom(skb) < need_tailroom) {
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL) {
iface->stats.tx_dropped++;
return NETDEV_TX_OK;
}
if (pskb_expand_head(skb, need_headroom, need_tailroom,
GFP_ATOMIC)) {
kfree_skb(skb);
iface->stats.tx_dropped++;
return NETDEV_TX_OK;
}
} else if (skb_headroom(skb) < need_headroom) {
struct sk_buff *tmp = skb;
skb = skb_realloc_headroom(skb, need_headroom);
kfree_skb(tmp);
if (skb == NULL) {
iface->stats.tx_dropped++;
return NETDEV_TX_OK;
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL) {
iface->stats.tx_dropped++;
return NETDEV_TX_OK;
}
}
if (encaps_data)
memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
if (use_wds == WDS_OWN_FRAME) {
memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
}
iface->stats.tx_packets++;
iface->stats.tx_bytes += skb->len;
skb_reset_mac_header(skb);
meta = (struct hostap_skb_tx_data *) skb->cb;
memset(meta, 0, sizeof(*meta));
meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
if (use_wds)
meta->flags |= HOSTAP_TX_FLAGS_WDS;
meta->ethertype = ethertype;
meta->iface = iface;
/* Send IEEE 802.11 encapsulated frame using the master radio device */
skb->dev = local->dev;
dev_queue_xmit(skb);
return NETDEV_TX_OK;
}
/* hard_start_xmit function for hostapd wlan#ap interfaces */
netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
struct hostap_skb_tx_data *meta;
struct ieee80211_hdr *hdr;
u16 fc;
iface = netdev_priv(dev);
local = iface->local;
if (skb->len < 10) {
printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb "
"(len=%d)\n", dev->name, skb->len);
kfree_skb(skb);
return NETDEV_TX_OK;
}
iface->stats.tx_packets++;
iface->stats.tx_bytes += skb->len;
meta = (struct hostap_skb_tx_data *) skb->cb;
memset(meta, 0, sizeof(*meta));
meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
meta->iface = iface;
if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
hdr = (struct ieee80211_hdr *) skb->data;
fc = le16_to_cpu(hdr->frame_control);
if (ieee80211_is_data(hdr->frame_control) &&
(fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DATA) {
u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN +
sizeof(rfc1042_header)];
meta->ethertype = (pos[0] << 8) | pos[1];
}
}
/* Send IEEE 802.11 encapsulated frame using the master radio device */
skb->dev = local->dev;
dev_queue_xmit(skb);
return NETDEV_TX_OK;
}
/* Called only from software IRQ */
static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
struct lib80211_crypt_data *crypt)
{
struct hostap_interface *iface;
local_info_t *local;
struct ieee80211_hdr *hdr;
int prefix_len, postfix_len, hdr_len, res;
iface = netdev_priv(skb->dev);
local = iface->local;
if (skb->len < IEEE80211_DATA_HDR3_LEN) {
kfree_skb(skb);
return NULL;
}
if (local->tkip_countermeasures &&
strcmp(crypt->ops->name, "TKIP") == 0) {
hdr = (struct ieee80211_hdr *) skb->data;
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
"TX packet to %pM\n",
local->dev->name, hdr->addr1);
}
kfree_skb(skb);
return NULL;
}
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL)
return NULL;
prefix_len = crypt->ops->extra_mpdu_prefix_len +
crypt->ops->extra_msdu_prefix_len;
postfix_len = crypt->ops->extra_mpdu_postfix_len +
crypt->ops->extra_msdu_postfix_len;
if ((skb_headroom(skb) < prefix_len ||
skb_tailroom(skb) < postfix_len) &&
pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) {
kfree_skb(skb);
return NULL;
}
hdr = (struct ieee80211_hdr *) skb->data;
hdr_len = hostap_80211_get_hdrlen(hdr->frame_control);
/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
* call both MSDU and MPDU encryption functions from here. */
atomic_inc(&crypt->refcnt);
res = 0;
if (crypt->ops->encrypt_msdu)
res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv);
if (res == 0 && crypt->ops->encrypt_mpdu)
res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
kfree_skb(skb);
return NULL;
}
return skb;
}
/* hard_start_xmit function for master radio interface wifi#.
* AP processing (TX rate control, power save buffering, etc.).
* Use hardware TX function to send the frame. */
netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
netdev_tx_t ret = NETDEV_TX_BUSY;
u16 fc;
struct hostap_tx_data tx;
ap_tx_ret tx_ret;
struct hostap_skb_tx_data *meta;
int no_encrypt = 0;
struct ieee80211_hdr *hdr;
iface = netdev_priv(dev);
local = iface->local;
tx.skb = skb;
tx.sta_ptr = NULL;
meta = (struct hostap_skb_tx_data *) skb->cb;
if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
"expected 0x%08x)\n",
dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC);
ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
goto fail;
}
if (local->host_encrypt) {
/* Set crypt to default algorithm and key; will be replaced in
* AP code if STA has own alg/key */
tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx];
tx.host_encrypt = 1;
} else {
tx.crypt = NULL;
tx.host_encrypt = 0;
}
if (skb->len < 24) {
printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb "
"(len=%d)\n", dev->name, skb->len);
ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
goto fail;
}
/* FIX (?):
* Wi-Fi 802.11b test plan suggests that AP should ignore power save
* bit in authentication and (re)association frames and assume tha
* STA remains awake for the response. */
tx_ret = hostap_handle_sta_tx(local, &tx);
skb = tx.skb;
meta = (struct hostap_skb_tx_data *) skb->cb;
hdr = (struct ieee80211_hdr *) skb->data;
fc = le16_to_cpu(hdr->frame_control);
switch (tx_ret) {
case AP_TX_CONTINUE:
break;
case AP_TX_CONTINUE_NOT_AUTHORIZED:
if (local->ieee_802_1x &&
ieee80211_is_data(hdr->frame_control) &&
meta->ethertype != ETH_P_PAE &&
!(meta->flags & HOSTAP_TX_FLAGS_WDS)) {
printk(KERN_DEBUG "%s: dropped frame to unauthorized "
"port (IEEE 802.1X): ethertype=0x%04x\n",
dev->name, meta->ethertype);
hostap_dump_tx_80211(dev->name, skb);
ret = NETDEV_TX_OK; /* drop packet */
iface->stats.tx_dropped++;
goto fail;
}
break;
case AP_TX_DROP:
ret = NETDEV_TX_OK; /* drop packet */
iface->stats.tx_dropped++;
goto fail;
case AP_TX_RETRY:
goto fail;
case AP_TX_BUFFERED:
/* do not free skb here, it will be freed when the
* buffered frame is sent/timed out */
ret = NETDEV_TX_OK;
goto tx_exit;
}
/* Request TX callback if protocol version is 2 in 802.11 header;
* this version 2 is a special case used between hostapd and kernel
* driver */
if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) &&
local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) {
meta->tx_cb_idx = local->ap->tx_callback_idx;
/* remove special version from the frame header */
fc &= ~IEEE80211_FCTL_VERS;
hdr->frame_control = cpu_to_le16(fc);
}
if (!ieee80211_is_data(hdr->frame_control)) {
no_encrypt = 1;
tx.crypt = NULL;
}
if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt &&
!(fc & IEEE80211_FCTL_PROTECTED)) {
no_encrypt = 1;
PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing "
"unencrypted EAPOL frame\n", dev->name);
tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */
}
if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu))
tx.crypt = NULL;
else if ((tx.crypt ||
local->crypt_info.crypt[local->crypt_info.tx_keyidx]) &&
!no_encrypt) {
/* Add ISWEP flag both for firmware and host based encryption
*/
fc |= IEEE80211_FCTL_PROTECTED;
hdr->frame_control = cpu_to_le16(fc);
} else if (local->drop_unencrypted &&
ieee80211_is_data(hdr->frame_control) &&
meta->ethertype != ETH_P_PAE) {
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: dropped unencrypted TX data "
"frame (drop_unencrypted=1)\n", dev->name);
}
iface->stats.tx_dropped++;
ret = NETDEV_TX_OK;
goto fail;
}
if (tx.crypt) {
skb = hostap_tx_encrypt(skb, tx.crypt);
if (skb == NULL) {
printk(KERN_DEBUG "%s: TX - encryption failed\n",
dev->name);
ret = NETDEV_TX_OK;
goto fail;
}
meta = (struct hostap_skb_tx_data *) skb->cb;
if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
"expected 0x%08x) after hostap_tx_encrypt\n",
dev->name, meta->magic,
HOSTAP_SKB_TX_DATA_MAGIC);
ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
goto fail;
}
}
if (local->func->tx == NULL || local->func->tx(skb, dev)) {
ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
} else {
ret = NETDEV_TX_OK;
iface->stats.tx_packets++;
iface->stats.tx_bytes += skb->len;
}
fail:
if (ret == NETDEV_TX_OK && skb)
dev_kfree_skb(skb);
tx_exit:
if (tx.sta_ptr)
hostap_handle_sta_release(tx.sta_ptr);
return ret;
}
EXPORT_SYMBOL(hostap_master_start_xmit);
| gpl-2.0 |
RichardWithnell/net-next-sim | drivers/hwmon/pmbus/ucd9200.c | 9535 | 5207 | /*
* Hardware monitoring driver for ucd9200 series Digital PWM System Controllers
*
* Copyright (C) 2011 Ericsson AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c/pmbus.h>
#include "pmbus.h"
#define UCD9200_PHASE_INFO 0xd2
#define UCD9200_DEVICE_ID 0xfd
enum chips { ucd9200, ucd9220, ucd9222, ucd9224, ucd9240, ucd9244, ucd9246,
ucd9248 };
static const struct i2c_device_id ucd9200_id[] = {
{"ucd9200", ucd9200},
{"ucd9220", ucd9220},
{"ucd9222", ucd9222},
{"ucd9224", ucd9224},
{"ucd9240", ucd9240},
{"ucd9244", ucd9244},
{"ucd9246", ucd9246},
{"ucd9248", ucd9248},
{}
};
MODULE_DEVICE_TABLE(i2c, ucd9200_id);
static int ucd9200_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
struct pmbus_driver_info *info;
const struct i2c_device_id *mid;
int i, j, ret;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
ret = i2c_smbus_read_block_data(client, UCD9200_DEVICE_ID,
block_buffer);
if (ret < 0) {
dev_err(&client->dev, "Failed to read device ID\n");
return ret;
}
block_buffer[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", block_buffer);
for (mid = ucd9200_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
if (id->driver_data != ucd9200 && id->driver_data != mid->driver_data)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
ret = i2c_smbus_read_block_data(client, UCD9200_PHASE_INFO,
block_buffer);
if (ret < 0) {
dev_err(&client->dev, "Failed to read phase information\n");
return ret;
}
/*
* Calculate number of configured pages (rails) from PHASE_INFO
* register.
* Rails have to be sequential, so we can abort after finding
* the first unconfigured rail.
*/
info->pages = 0;
for (i = 0; i < ret; i++) {
if (!block_buffer[i])
break;
info->pages++;
}
if (!info->pages) {
dev_err(&client->dev, "No rails configured\n");
return -ENODEV;
}
dev_info(&client->dev, "%d rails configured\n", info->pages);
/*
* Set PHASE registers on all pages to 0xff to ensure that phase
* specific commands will apply to all phases of a given page (rail).
* This only affects the READ_IOUT and READ_TEMPERATURE2 registers.
* READ_IOUT will return the sum of currents of all phases of a rail,
* and READ_TEMPERATURE2 will return the maximum temperature detected
* for the the phases of the rail.
*/
for (i = 0; i < info->pages; i++) {
/*
* Setting PAGE & PHASE fails once in a while for no obvious
* reason, so we need to retry a couple of times.
*/
for (j = 0; j < 3; j++) {
ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
if (ret < 0)
continue;
ret = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
0xff);
if (ret < 0)
continue;
break;
}
if (ret < 0) {
dev_err(&client->dev,
"Failed to initialize PHASE registers\n");
return ret;
}
}
if (info->pages > 1)
i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT |
PMBUS_HAVE_IIN | PMBUS_HAVE_PIN |
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP |
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
for (i = 1; i < info->pages; i++)
info->func[i] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_POUT |
PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
/* ucd9240 supports a single fan */
if (mid->driver_data == ucd9240)
info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12;
return pmbus_do_probe(client, mid, info);
}
/* This is the driver that will be inserted */
static struct i2c_driver ucd9200_driver = {
.driver = {
.name = "ucd9200",
},
.probe = ucd9200_probe,
.remove = pmbus_do_remove,
.id_table = ucd9200_id,
};
module_i2c_driver(ucd9200_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for TI UCD922x, UCD924x");
MODULE_LICENSE("GPL");
| gpl-2.0 |
UnofficialAOKPLWW/android_kernel_semc_msm7x30 | fs/fscache/cookie.c | 9791 | 13661 | /* netfs cookie management
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* See Documentation/filesystems/caching/netfs-api.txt for more information on
* the netfs API.
*/
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
#include <linux/slab.h>
#include "internal.h"
struct kmem_cache *fscache_cookie_jar;
static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
static int fscache_alloc_object(struct fscache_cache *cache,
struct fscache_cookie *cookie);
static int fscache_attach_object(struct fscache_cookie *cookie,
struct fscache_object *object);
/*
* initialise an cookie jar slab element prior to any use
*/
void fscache_cookie_init_once(void *_cookie)
{
struct fscache_cookie *cookie = _cookie;
memset(cookie, 0, sizeof(*cookie));
spin_lock_init(&cookie->lock);
spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects);
}
/*
* request a cookie to represent an object (index, datafile, xattr, etc)
* - parent specifies the parent object
* - the top level index cookie for each netfs is stored in the fscache_netfs
* struct upon registration
* - def points to the definition
* - the netfs_data will be passed to the functions pointed to in *def
* - all attached caches will be searched to see if they contain this object
* - index objects aren't stored on disk until there's a dependent file that
* needs storing
* - other objects are stored in a selected cache immediately, and all the
* indices forming the path to it are instantiated if necessary
* - we never let on to the netfs about errors
* - we may set a negative cookie pointer, but that's okay
*/
struct fscache_cookie *__fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
void *netfs_data)
{
struct fscache_cookie *cookie;
BUG_ON(!def);
_enter("{%s},{%s},%p",
parent ? (char *) parent->def->name : "<no-parent>",
def->name, netfs_data);
fscache_stat(&fscache_n_acquires);
/* if there's no parent cookie, then we don't create one here either */
if (!parent) {
fscache_stat(&fscache_n_acquires_null);
_leave(" [no parent]");
return NULL;
}
/* validate the definition */
BUG_ON(!def->get_key);
BUG_ON(!def->name[0]);
BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
/* allocate and initialise a cookie */
cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
if (!cookie) {
fscache_stat(&fscache_n_acquires_oom);
_leave(" [ENOMEM]");
return NULL;
}
atomic_set(&cookie->usage, 1);
atomic_set(&cookie->n_children, 0);
atomic_inc(&parent->usage);
atomic_inc(&parent->n_children);
cookie->def = def;
cookie->parent = parent;
cookie->netfs_data = netfs_data;
cookie->flags = 0;
/* radix tree insertion won't use the preallocation pool unless it's
* told it may not wait */
INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
switch (cookie->def->type) {
case FSCACHE_COOKIE_TYPE_INDEX:
fscache_stat(&fscache_n_cookie_index);
break;
case FSCACHE_COOKIE_TYPE_DATAFILE:
fscache_stat(&fscache_n_cookie_data);
break;
default:
fscache_stat(&fscache_n_cookie_special);
break;
}
/* if the object is an index then we need do nothing more here - we
* create indices on disk when we need them as an index may exist in
* multiple caches */
if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
if (fscache_acquire_non_index_cookie(cookie) < 0) {
atomic_dec(&parent->n_children);
__fscache_cookie_put(cookie);
fscache_stat(&fscache_n_acquires_nobufs);
_leave(" = NULL");
return NULL;
}
}
fscache_stat(&fscache_n_acquires_ok);
_leave(" = %p", cookie);
return cookie;
}
EXPORT_SYMBOL(__fscache_acquire_cookie);
/*
* acquire a non-index cookie
* - this must make sure the index chain is instantiated and instantiate the
* object representation too
*/
static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
{
struct fscache_object *object;
struct fscache_cache *cache;
uint64_t i_size;
int ret;
_enter("");
cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE;
/* now we need to see whether the backing objects for this cookie yet
* exist, if not there'll be nothing to search */
down_read(&fscache_addremove_sem);
if (list_empty(&fscache_cache_list)) {
up_read(&fscache_addremove_sem);
_leave(" = 0 [no caches]");
return 0;
}
/* select a cache in which to store the object */
cache = fscache_select_cache_for_object(cookie->parent);
if (!cache) {
up_read(&fscache_addremove_sem);
fscache_stat(&fscache_n_acquires_no_cache);
_leave(" = -ENOMEDIUM [no cache]");
return -ENOMEDIUM;
}
_debug("cache %s", cache->tag->name);
cookie->flags =
(1 << FSCACHE_COOKIE_LOOKING_UP) |
(1 << FSCACHE_COOKIE_CREATING) |
(1 << FSCACHE_COOKIE_NO_DATA_YET);
/* ask the cache to allocate objects for this cookie and its parent
* chain */
ret = fscache_alloc_object(cache, cookie);
if (ret < 0) {
up_read(&fscache_addremove_sem);
_leave(" = %d", ret);
return ret;
}
/* pass on how big the object we're caching is supposed to be */
cookie->def->get_attr(cookie->netfs_data, &i_size);
spin_lock(&cookie->lock);
if (hlist_empty(&cookie->backing_objects)) {
spin_unlock(&cookie->lock);
goto unavailable;
}
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
fscache_set_store_limit(object, i_size);
/* initiate the process of looking up all the objects in the chain
* (done by fscache_initialise_object()) */
fscache_enqueue_object(object);
spin_unlock(&cookie->lock);
/* we may be required to wait for lookup to complete at this point */
if (!fscache_defer_lookup) {
_debug("non-deferred lookup %p", &cookie->flags);
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
_debug("complete");
if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
goto unavailable;
}
up_read(&fscache_addremove_sem);
_leave(" = 0 [deferred]");
return 0;
unavailable:
up_read(&fscache_addremove_sem);
_leave(" = -ENOBUFS");
return -ENOBUFS;
}
/*
* recursively allocate cache object records for a cookie/cache combination
* - caller must be holding the addremove sem
*/
static int fscache_alloc_object(struct fscache_cache *cache,
struct fscache_cookie *cookie)
{
struct fscache_object *object;
struct hlist_node *_n;
int ret;
_enter("%p,%p{%s}", cache, cookie, cookie->def->name);
spin_lock(&cookie->lock);
hlist_for_each_entry(object, _n, &cookie->backing_objects,
cookie_link) {
if (object->cache == cache)
goto object_already_extant;
}
spin_unlock(&cookie->lock);
/* ask the cache to allocate an object (we may end up with duplicate
* objects at this stage, but we sort that out later) */
fscache_stat(&fscache_n_cop_alloc_object);
object = cache->ops->alloc_object(cache, cookie);
fscache_stat_d(&fscache_n_cop_alloc_object);
if (IS_ERR(object)) {
fscache_stat(&fscache_n_object_no_alloc);
ret = PTR_ERR(object);
goto error;
}
fscache_stat(&fscache_n_object_alloc);
object->debug_id = atomic_inc_return(&fscache_object_debug_id);
_debug("ALLOC OBJ%x: %s {%lx}",
object->debug_id, cookie->def->name, object->events);
ret = fscache_alloc_object(cache, cookie->parent);
if (ret < 0)
goto error_put;
/* only attach if we managed to allocate all we needed, otherwise
* discard the object we just allocated and instead use the one
* attached to the cookie */
if (fscache_attach_object(cookie, object) < 0) {
fscache_stat(&fscache_n_cop_put_object);
cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
}
_leave(" = 0");
return 0;
object_already_extant:
ret = -ENOBUFS;
if (object->state >= FSCACHE_OBJECT_DYING) {
spin_unlock(&cookie->lock);
goto error;
}
spin_unlock(&cookie->lock);
_leave(" = 0 [found]");
return 0;
error_put:
fscache_stat(&fscache_n_cop_put_object);
cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
error:
_leave(" = %d", ret);
return ret;
}
/*
* attach a cache object to a cookie
*/
static int fscache_attach_object(struct fscache_cookie *cookie,
struct fscache_object *object)
{
struct fscache_object *p;
struct fscache_cache *cache = object->cache;
struct hlist_node *_n;
int ret;
_enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
spin_lock(&cookie->lock);
/* there may be multiple initial creations of this object, but we only
* want one */
ret = -EEXIST;
hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) {
if (p->cache == object->cache) {
if (p->state >= FSCACHE_OBJECT_DYING)
ret = -ENOBUFS;
goto cant_attach_object;
}
}
/* pin the parent object */
spin_lock_nested(&cookie->parent->lock, 1);
hlist_for_each_entry(p, _n, &cookie->parent->backing_objects,
cookie_link) {
if (p->cache == object->cache) {
if (p->state >= FSCACHE_OBJECT_DYING) {
ret = -ENOBUFS;
spin_unlock(&cookie->parent->lock);
goto cant_attach_object;
}
object->parent = p;
spin_lock(&p->lock);
p->n_children++;
spin_unlock(&p->lock);
break;
}
}
spin_unlock(&cookie->parent->lock);
/* attach to the cache's object list */
if (list_empty(&object->cache_link)) {
spin_lock(&cache->object_list_lock);
list_add(&object->cache_link, &cache->object_list);
spin_unlock(&cache->object_list_lock);
}
/* attach to the cookie */
object->cookie = cookie;
atomic_inc(&cookie->usage);
hlist_add_head(&object->cookie_link, &cookie->backing_objects);
fscache_objlist_add(object);
ret = 0;
cant_attach_object:
spin_unlock(&cookie->lock);
_leave(" = %d", ret);
return ret;
}
/*
* update the index entries backing a cookie
*/
void __fscache_update_cookie(struct fscache_cookie *cookie)
{
struct fscache_object *object;
struct hlist_node *_p;
fscache_stat(&fscache_n_updates);
if (!cookie) {
fscache_stat(&fscache_n_updates_null);
_leave(" [no cookie]");
return;
}
_enter("{%s}", cookie->def->name);
BUG_ON(!cookie->def->get_aux);
spin_lock(&cookie->lock);
/* update the index entry on disk in each cache backing this cookie */
hlist_for_each_entry(object, _p,
&cookie->backing_objects, cookie_link) {
fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
}
spin_unlock(&cookie->lock);
_leave("");
}
EXPORT_SYMBOL(__fscache_update_cookie);
/*
* release a cookie back to the cache
* - the object will be marked as recyclable on disk if retire is true
* - all dependents of this cookie must have already been unregistered
* (indices/files/pages)
*/
void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
{
struct fscache_cache *cache;
struct fscache_object *object;
unsigned long event;
fscache_stat(&fscache_n_relinquishes);
if (retire)
fscache_stat(&fscache_n_relinquishes_retire);
if (!cookie) {
fscache_stat(&fscache_n_relinquishes_null);
_leave(" [no cookie]");
return;
}
_enter("%p{%s,%p},%d",
cookie, cookie->def->name, cookie->netfs_data, retire);
if (atomic_read(&cookie->n_children) != 0) {
printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
cookie->def->name);
BUG();
}
/* wait for the cookie to finish being instantiated (or to fail) */
if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
fscache_stat(&fscache_n_relinquishes_waitcrt);
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
}
event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
spin_lock(&cookie->lock);
/* break links with all the active objects */
while (!hlist_empty(&cookie->backing_objects)) {
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object,
cookie_link);
_debug("RELEASE OBJ%x", object->debug_id);
/* detach each cache object from the object cookie */
spin_lock(&object->lock);
hlist_del_init(&object->cookie_link);
cache = object->cache;
object->cookie = NULL;
fscache_raise_event(object, event);
spin_unlock(&object->lock);
if (atomic_dec_and_test(&cookie->usage))
/* the cookie refcount shouldn't be reduced to 0 yet */
BUG();
}
/* detach pointers back to the netfs */
cookie->netfs_data = NULL;
cookie->def = NULL;
spin_unlock(&cookie->lock);
if (cookie->parent) {
ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
atomic_dec(&cookie->parent->n_children);
}
/* finally dispose of the cookie */
ASSERTCMP(atomic_read(&cookie->usage), >, 0);
fscache_cookie_put(cookie);
_leave("");
}
EXPORT_SYMBOL(__fscache_relinquish_cookie);
/*
* destroy a cookie
*/
void __fscache_cookie_put(struct fscache_cookie *cookie)
{
struct fscache_cookie *parent;
_enter("%p", cookie);
for (;;) {
_debug("FREE COOKIE %p", cookie);
parent = cookie->parent;
BUG_ON(!hlist_empty(&cookie->backing_objects));
kmem_cache_free(fscache_cookie_jar, cookie);
if (!parent)
break;
cookie = parent;
BUG_ON(atomic_read(&cookie->usage) <= 0);
if (!atomic_dec_and_test(&cookie->usage))
break;
}
_leave("");
}
| gpl-2.0 |
syhost/android_kernel_oppo_find7a | drivers/net/wireless/prima/CORE/MAC/src/pe/lim/limFT.c | 64 | 59444 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef WLAN_FEATURE_VOWIFI_11R
/**=========================================================================
\brief implementation for PE 11r VoWiFi FT Protocol
Copyright 2008 (c) Qualcomm Technologies, Inc. All Rights Reserved.
Qualcomm Technologies Confidential and Proprietary.
========================================================================*/
/* $Header$ */
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <limSendMessages.h>
#include <limTypes.h>
#include <limFT.h>
#include <limFTDefs.h>
#include <limUtils.h>
#include <limPropExtsUtils.h>
#include <limAssocUtils.h>
#include <limSession.h>
#include <limAdmitControl.h>
#include "wmmApsd.h"
#define LIM_FT_RIC_BA_SSN 1
#define LIM_FT_RIC_BA_DIALOG_TOKEN_TID_0 248
#define LIM_FT_RIC_DESCRIPTOR_RESOURCE_TYPE_BA 1
#define LIM_FT_RIC_DESCRIPTOR_MAX_VAR_DATA_LEN 255
/*--------------------------------------------------------------------------
Initialize the FT variables.
------------------------------------------------------------------------*/
void limFTOpen(tpAniSirGlobal pMac)
{
pMac->ft.ftPEContext.pFTPreAuthReq = NULL;
pMac->ft.ftPEContext.psavedsessionEntry = NULL;
}
/*--------------------------------------------------------------------------
Cleanup FT variables.
------------------------------------------------------------------------*/
void limFTCleanup(tpAniSirGlobal pMac)
{
if (pMac->ft.ftPEContext.pFTPreAuthReq)
{
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Freeing pFTPreAuthReq= %p",
__func__, pMac->ft.ftPEContext.pFTPreAuthReq);)
#endif
if (pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription)
{
vos_mem_free(pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription);
pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription = NULL;
}
vos_mem_free(pMac->ft.ftPEContext.pFTPreAuthReq);
pMac->ft.ftPEContext.pFTPreAuthReq = NULL;
}
// This is the old session, should be deleted else where.
// We should not be cleaning it here, just set it to NULL.
if (pMac->ft.ftPEContext.psavedsessionEntry)
{
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Setting psavedsessionEntry= %p to NULL",
__func__, pMac->ft.ftPEContext.psavedsessionEntry);)
#endif
pMac->ft.ftPEContext.psavedsessionEntry = NULL;
}
// This is the extra session we added as part of Auth resp
// clean it up.
if (pMac->ft.ftPEContext.pftSessionEntry)
{
if ((((tpPESession)(pMac->ft.ftPEContext.pftSessionEntry))->valid) &&
(((tpPESession)(pMac->ft.ftPEContext.pftSessionEntry))->limSmeState == eLIM_SME_WT_REASSOC_STATE))
{
PELOGE(limLog( pMac, LOGE, "%s: Deleting Preauth Session %d", __func__, ((tpPESession)pMac->ft.ftPEContext.pftSessionEntry)->peSessionId);)
peDeleteSession(pMac, pMac->ft.ftPEContext.pftSessionEntry);
}
pMac->ft.ftPEContext.pftSessionEntry = NULL;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Setting psavedsessionEntry= %p to NULL",
__func__, pMac->ft.ftPEContext.psavedsessionEntry);)
#endif
}
if (pMac->ft.ftPEContext.pAddBssReq)
{
vos_mem_free(pMac->ft.ftPEContext.pAddBssReq);
pMac->ft.ftPEContext.pAddBssReq = NULL;
}
if (pMac->ft.ftPEContext.pAddStaReq)
{
vos_mem_free(pMac->ft.ftPEContext.pAddStaReq);
pMac->ft.ftPEContext.pAddStaReq = NULL;
}
pMac->ft.ftPEContext.ftPreAuthStatus = eSIR_SUCCESS;
}
/*--------------------------------------------------------------------------
Init FT variables.
------------------------------------------------------------------------*/
void limFTInit(tpAniSirGlobal pMac)
{
if (pMac->ft.ftPEContext.pFTPreAuthReq)
{
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Freeing pFTPreAuthReq= %p",
__func__, pMac->ft.ftPEContext.pFTPreAuthReq);)
#endif
if (pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription)
{
vos_mem_free(pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription);
pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription = NULL;
}
vos_mem_free(pMac->ft.ftPEContext.pFTPreAuthReq);
pMac->ft.ftPEContext.pFTPreAuthReq = NULL;
}
// This is the old session, should be deleted else where.
// We should not be cleaning it here, just set it to NULL.
if (pMac->ft.ftPEContext.psavedsessionEntry)
{
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Setting psavedsessionEntry= %p to NULL",
__func__, pMac->ft.ftPEContext.psavedsessionEntry);)
#endif
pMac->ft.ftPEContext.psavedsessionEntry = NULL;
}
// This is the extra session we added as part of Auth resp
// clean it up.
if (pMac->ft.ftPEContext.pftSessionEntry)
{
/* Cannot delete sessions across associations */
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Deleting session = %p ",
__func__, pMac->ft.ftPEContext.pftSessionEntry);)
#endif
pMac->ft.ftPEContext.pftSessionEntry = NULL;
}
if (pMac->ft.ftPEContext.pAddBssReq)
{
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Freeing AddBssReq = %p ",
__func__, pMac->ft.ftPEContext.pAddBssReq);)
#endif
vos_mem_free(pMac->ft.ftPEContext.pAddBssReq);
pMac->ft.ftPEContext.pAddBssReq = NULL;
}
if (pMac->ft.ftPEContext.pAddStaReq)
{
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: Freeing AddStaReq = %p ",
__func__, pMac->ft.ftPEContext.pAddStaReq);)
#endif
vos_mem_free(pMac->ft.ftPEContext.pAddStaReq);
pMac->ft.ftPEContext.pAddStaReq = NULL;
}
pMac->ft.ftPEContext.ftPreAuthStatus = eSIR_SUCCESS;
}
/*------------------------------------------------------------------
*
* This is the handler after suspending the link.
* We suspend the link and then now proceed to switch channel.
*
*------------------------------------------------------------------*/
void FTPreAuthSuspendLinkHandler(tpAniSirGlobal pMac, eHalStatus status, tANI_U32 *data)
{
tpPESession psessionEntry;
// The link is suspended of not ?
if (status != eHAL_STATUS_SUCCESS)
{
PELOGE(limLog( pMac, LOGE, "%s: Returning ", __func__);)
// Post the FT Pre Auth Response to SME
limPostFTPreAuthRsp(pMac, eSIR_FAILURE, NULL, 0, (tpPESession)data);
return;
}
psessionEntry = (tpPESession)data;
// Suspended, now move to a different channel.
// Perform some sanity check before proceeding.
if ((pMac->ft.ftPEContext.pFTPreAuthReq) && psessionEntry)
{
limChangeChannelWithCallback(pMac,
pMac->ft.ftPEContext.pFTPreAuthReq->preAuthchannelNum,
limPerformFTPreAuth, NULL, psessionEntry);
return;
}
// Else return error.
limPostFTPreAuthRsp(pMac, eSIR_FAILURE, NULL, 0, psessionEntry);
}
/*--------------------------------------------------------------------------
In this function, we process the FT Pre Auth Req.
We receive Pre-Auth
Suspend link
Register a call back
In the call back, we will need to accept frames from the new bssid
Send out the auth req to new AP.
Start timer and when the timer is done or if we receive the Auth response
We change channel
Resume link
------------------------------------------------------------------------*/
int limProcessFTPreAuthReq(tpAniSirGlobal pMac, tpSirMsgQ pMsg)
{
int bufConsumed = FALSE;
tpPESession psessionEntry;
tANI_U8 sessionId;
// Now we are starting fresh make sure all's cleanup.
limFTInit(pMac);
// Can set it only after sending auth
pMac->ft.ftPEContext.ftPreAuthStatus = eSIR_FAILURE;
if( pMac->ft.ftPEContext.pFTPreAuthReq &&
pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription)
{
vos_mem_free(pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription);
pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription = NULL;
}
// We need information from the Pre-Auth Req. Lets save that
pMac->ft.ftPEContext.pFTPreAuthReq = (tpSirFTPreAuthReq)pMsg->bodyptr;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOG1, "%s: PE Auth ft_ies_length=%02x%02x%02x", __func__,
pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies[0],
pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies[1],
pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies[2]);)
#endif
// Get the current session entry
psessionEntry = peFindSessionByBssid(pMac,
pMac->ft.ftPEContext.pFTPreAuthReq->currbssId, &sessionId);
if (psessionEntry == NULL)
{
PELOGE(limLog( pMac, LOGE, "%s: Unable to find session for the following bssid",
__func__);)
limPrintMacAddr( pMac, pMac->ft.ftPEContext.pFTPreAuthReq->currbssId, LOGE );
// Post the FT Pre Auth Response to SME
limPostFTPreAuthRsp(pMac, eSIR_FAILURE, NULL, 0, NULL);
if (pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription)
{
vos_mem_free(pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription);
pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription = NULL;
}
pMac->ft.ftPEContext.pFTPreAuthReq = NULL;
return TRUE;
}
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_PRE_AUTH_REQ_EVENT, psessionEntry, 0, 0);
#endif
// Dont need to suspend if APs are in same channel
if (psessionEntry->currentOperChannel != pMac->ft.ftPEContext.pFTPreAuthReq->preAuthchannelNum)
{
// Need to suspend link only if the channels are different
PELOG2(limLog(pMac,LOG2,"%s: Performing pre-auth on different"
" channel (session %p)", __func__, psessionEntry);)
limSuspendLink(pMac, eSIR_CHECK_ROAMING_SCAN, FTPreAuthSuspendLinkHandler,
(tANI_U32 *)psessionEntry);
}
else
{
PELOG2(limLog(pMac,LOG2,"%s: Performing pre-auth on same"
" channel (session %p)", __func__, psessionEntry);)
// We are in the same channel. Perform pre-auth
limPerformFTPreAuth(pMac, eHAL_STATUS_SUCCESS, NULL, psessionEntry);
}
return bufConsumed;
}
/*------------------------------------------------------------------
* Send the Auth1
* Receive back Auth2
*------------------------------------------------------------------*/
void limPerformFTPreAuth(tpAniSirGlobal pMac, eHalStatus status, tANI_U32 *data,
tpPESession psessionEntry)
{
tSirMacAuthFrameBody authFrame;
if (psessionEntry->is11Rconnection)
{
// Only 11r assoc has FT IEs.
if (pMac->ft.ftPEContext.pFTPreAuthReq->ft_ies == NULL)
{
PELOGE(limLog( pMac, LOGE, "%s: FTIEs for Auth Req Seq 1 is absent");)
return;
}
}
if (status != eHAL_STATUS_SUCCESS)
{
PELOGE(limLog( pMac, LOGE, "%s: Change channel not successful for FT pre-auth");)
return;
}
pMac->ft.ftPEContext.psavedsessionEntry = psessionEntry;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOG2(limLog(pMac,LOG2,"Entered wait auth2 state for FT"
" (old session %p)",
pMac->ft.ftPEContext.psavedsessionEntry);)
#endif
if (psessionEntry->is11Rconnection)
{
// Now we are on the right channel and need to send out Auth1 and
// receive Auth2.
authFrame.authAlgoNumber = eSIR_FT_AUTH; // Set the auth type to FT
}
#if defined FEATURE_WLAN_CCX || defined FEATURE_WLAN_LFR
else
{
// Will need to make isCCXconnection a enum may be for further
// improvements to this to match this algorithm number
authFrame.authAlgoNumber = eSIR_OPEN_SYSTEM; // For now if its CCX and 11r FT.
}
#endif
authFrame.authTransactionSeqNumber = SIR_MAC_AUTH_FRAME_1;
authFrame.authStatusCode = 0;
// Start timer here to come back to operating channel.
pMac->lim.limTimers.gLimFTPreAuthRspTimer.sessionId = psessionEntry->peSessionId;
if(TX_SUCCESS != tx_timer_activate(&pMac->lim.limTimers.gLimFTPreAuthRspTimer))
{
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOGE, "%s: FT Auth Rsp Timer Start Failed", __func__);)
#endif
}
MTRACE(macTrace(pMac, TRACE_CODE_TIMER_ACTIVATE, psessionEntry->peSessionId, eLIM_FT_PREAUTH_RSP_TIMER));
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOG1, "%s: FT Auth Rsp Timer Started", __func__);)
#endif
limSendAuthMgmtFrame(pMac, &authFrame,
pMac->ft.ftPEContext.pFTPreAuthReq->preAuthbssId,
LIM_NO_WEP_IN_FC, psessionEntry);
return;
}
/*------------------------------------------------------------------
*
* Create the new Add Bss Req to the new AP.
* This will be used when we are ready to FT to the new AP.
* The newly created ft Session entry is passed to this function
*
*------------------------------------------------------------------*/
tSirRetStatus limFTPrepareAddBssReq( tpAniSirGlobal pMac,
tANI_U8 updateEntry, tpPESession pftSessionEntry,
tpSirBssDescription bssDescription )
{
tpAddBssParams pAddBssParams = NULL;
tANI_U8 i;
tANI_U8 chanWidthSupp = 0;
tSchBeaconStruct *pBeaconStruct;
pBeaconStruct = vos_mem_malloc(sizeof(tSchBeaconStruct));
if (NULL == pBeaconStruct)
{
limLog(pMac, LOGE, FL("Unable to allocate memory for creating ADD_BSS") );
return eSIR_MEM_ALLOC_FAILED;
}
// Package SIR_HAL_ADD_BSS_REQ message parameters
pAddBssParams = vos_mem_malloc(sizeof( tAddBssParams ));
if (NULL == pAddBssParams)
{
vos_mem_free(pBeaconStruct);
limLog( pMac, LOGP,
FL( "Unable to allocate memory for creating ADD_BSS" ));
return (eSIR_MEM_ALLOC_FAILED);
}
vos_mem_set((tANI_U8 *) pAddBssParams, sizeof( tAddBssParams ), 0);
limExtractApCapabilities( pMac,
(tANI_U8 *) bssDescription->ieFields,
limGetIElenFromBssDescription( bssDescription ), pBeaconStruct );
if (pMac->lim.gLimProtectionControl != WNI_CFG_FORCE_POLICY_PROTECTION_DISABLE)
limDecideStaProtectionOnAssoc(pMac, pBeaconStruct, pftSessionEntry);
vos_mem_copy(pAddBssParams->bssId, bssDescription->bssId,
sizeof(tSirMacAddr));
// Fill in tAddBssParams selfMacAddr
vos_mem_copy(pAddBssParams->selfMacAddr, pftSessionEntry->selfMacAddr,
sizeof(tSirMacAddr));
pAddBssParams->bssType = pftSessionEntry->bssType;//eSIR_INFRASTRUCTURE_MODE;
pAddBssParams->operMode = BSS_OPERATIONAL_MODE_STA;
pAddBssParams->beaconInterval = bssDescription->beaconInterval;
pAddBssParams->dtimPeriod = pBeaconStruct->tim.dtimPeriod;
pAddBssParams->updateBss = updateEntry;
pAddBssParams->cfParamSet.cfpCount = pBeaconStruct->cfParamSet.cfpCount;
pAddBssParams->cfParamSet.cfpPeriod = pBeaconStruct->cfParamSet.cfpPeriod;
pAddBssParams->cfParamSet.cfpMaxDuration = pBeaconStruct->cfParamSet.cfpMaxDuration;
pAddBssParams->cfParamSet.cfpDurRemaining = pBeaconStruct->cfParamSet.cfpDurRemaining;
pAddBssParams->rateSet.numRates = pBeaconStruct->supportedRates.numRates;
vos_mem_copy(pAddBssParams->rateSet.rate,
pBeaconStruct->supportedRates.rate, pBeaconStruct->supportedRates.numRates);
pAddBssParams->nwType = bssDescription->nwType;
pAddBssParams->shortSlotTimeSupported = (tANI_U8)pBeaconStruct->capabilityInfo.shortSlotTime;
pAddBssParams->llaCoexist = (tANI_U8) pftSessionEntry->beaconParams.llaCoexist;
pAddBssParams->llbCoexist = (tANI_U8) pftSessionEntry->beaconParams.llbCoexist;
pAddBssParams->llgCoexist = (tANI_U8) pftSessionEntry->beaconParams.llgCoexist;
pAddBssParams->ht20Coexist = (tANI_U8) pftSessionEntry->beaconParams.ht20Coexist;
// Use the advertised capabilities from the received beacon/PR
if (IS_DOT11_MODE_HT(pftSessionEntry->dot11mode) && ( pBeaconStruct->HTCaps.present ))
{
pAddBssParams->htCapable = pBeaconStruct->HTCaps.present;
if ( pBeaconStruct->HTInfo.present )
{
pAddBssParams->htOperMode = (tSirMacHTOperatingMode)pBeaconStruct->HTInfo.opMode;
pAddBssParams->dualCTSProtection = ( tANI_U8 ) pBeaconStruct->HTInfo.dualCTSProtection;
chanWidthSupp = limGetHTCapability( pMac, eHT_SUPPORTED_CHANNEL_WIDTH_SET, pftSessionEntry);
if( (pBeaconStruct->HTCaps.supportedChannelWidthSet) &&
(chanWidthSupp) )
{
pAddBssParams->txChannelWidthSet = ( tANI_U8 ) pBeaconStruct->HTInfo.recommendedTxWidthSet;
pAddBssParams->currentExtChannel = pBeaconStruct->HTInfo.secondaryChannelOffset;
}
else
{
pAddBssParams->txChannelWidthSet = WNI_CFG_CHANNEL_BONDING_MODE_DISABLE;
pAddBssParams->currentExtChannel = PHY_SINGLE_CHANNEL_CENTERED;
}
pAddBssParams->llnNonGFCoexist = (tANI_U8)pBeaconStruct->HTInfo.nonGFDevicesPresent;
pAddBssParams->fLsigTXOPProtectionFullSupport = (tANI_U8)pBeaconStruct->HTInfo.lsigTXOPProtectionFullSupport;
pAddBssParams->fRIFSMode = pBeaconStruct->HTInfo.rifsMode;
}
}
pAddBssParams->currentOperChannel = bssDescription->channelId;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
limLog( pMac, LOG1, FL( "SIR_HAL_ADD_BSS_REQ with channel = %d..." ),
pAddBssParams->currentOperChannel);
#endif
// Populate the STA-related parameters here
// Note that the STA here refers to the AP
{
pAddBssParams->staContext.staType = STA_ENTRY_OTHER; // Identifying AP as an STA
vos_mem_copy(pAddBssParams->staContext.bssId,
bssDescription->bssId,
sizeof(tSirMacAddr));
pAddBssParams->staContext.listenInterval = bssDescription->beaconInterval;
pAddBssParams->staContext.assocId = 0; // Is SMAC OK with this?
pAddBssParams->staContext.uAPSD = 0;
pAddBssParams->staContext.maxSPLen = 0;
pAddBssParams->staContext.shortPreambleSupported = (tANI_U8)pBeaconStruct->capabilityInfo.shortPreamble;
pAddBssParams->staContext.updateSta = updateEntry;
pAddBssParams->staContext.encryptType = pftSessionEntry->encryptType;
if (IS_DOT11_MODE_HT(pftSessionEntry->dot11mode) && ( pBeaconStruct->HTCaps.present ))
{
pAddBssParams->staContext.us32MaxAmpduDuration = 0;
pAddBssParams->staContext.htCapable = 1;
pAddBssParams->staContext.greenFieldCapable = ( tANI_U8 ) pBeaconStruct->HTCaps.greenField;
pAddBssParams->staContext.lsigTxopProtection = ( tANI_U8 ) pBeaconStruct->HTCaps.lsigTXOPProtection;
if( (pBeaconStruct->HTCaps.supportedChannelWidthSet) &&
(chanWidthSupp) )
{
pAddBssParams->staContext.txChannelWidthSet = ( tANI_U8 )pBeaconStruct->HTInfo.recommendedTxWidthSet;
}
else
{
pAddBssParams->staContext.txChannelWidthSet = WNI_CFG_CHANNEL_BONDING_MODE_DISABLE;
}
pAddBssParams->staContext.mimoPS = (tSirMacHTMIMOPowerSaveState)pBeaconStruct->HTCaps.mimoPowerSave;
pAddBssParams->staContext.delBASupport = ( tANI_U8 ) pBeaconStruct->HTCaps.delayedBA;
pAddBssParams->staContext.maxAmsduSize = ( tANI_U8 ) pBeaconStruct->HTCaps.maximalAMSDUsize;
pAddBssParams->staContext.maxAmpduDensity = pBeaconStruct->HTCaps.mpduDensity;
pAddBssParams->staContext.fDsssCckMode40Mhz = (tANI_U8)pBeaconStruct->HTCaps.dsssCckMode40MHz;
pAddBssParams->staContext.fShortGI20Mhz = (tANI_U8)pBeaconStruct->HTCaps.shortGI20MHz;
pAddBssParams->staContext.fShortGI40Mhz = (tANI_U8)pBeaconStruct->HTCaps.shortGI40MHz;
pAddBssParams->staContext.maxAmpduSize= pBeaconStruct->HTCaps.maxRxAMPDUFactor;
if( pBeaconStruct->HTInfo.present )
pAddBssParams->staContext.rifsMode = pBeaconStruct->HTInfo.rifsMode;
}
if ((pftSessionEntry->limWmeEnabled && pBeaconStruct->wmeEdcaPresent) ||
(pftSessionEntry->limQosEnabled && pBeaconStruct->edcaPresent))
pAddBssParams->staContext.wmmEnabled = 1;
else
pAddBssParams->staContext.wmmEnabled = 0;
//Update the rates
#ifdef WLAN_FEATURE_11AC
limPopulatePeerRateSet(pMac, &pAddBssParams->staContext.supportedRates,
pBeaconStruct->HTCaps.supportedMCSSet,
false,pftSessionEntry,&pBeaconStruct->VHTCaps);
#else
limPopulatePeerRateSet(pMac, &pAddBssParams->staContext.supportedRates,
beaconStruct.HTCaps.supportedMCSSet, false,pftSessionEntry);
#endif
limFillSupportedRatesInfo(pMac, NULL, &pAddBssParams->staContext.supportedRates,pftSessionEntry);
}
//Disable BA. It will be set as part of ADDBA negotiation.
for( i = 0; i < STACFG_MAX_TC; i++ )
{
pAddBssParams->staContext.staTCParams[i].txUseBA = eBA_DISABLE;
pAddBssParams->staContext.staTCParams[i].rxUseBA = eBA_DISABLE;
pAddBssParams->staContext.staTCParams[i].txBApolicy = eBA_POLICY_IMMEDIATE;
pAddBssParams->staContext.staTCParams[i].rxBApolicy = eBA_POLICY_IMMEDIATE;
}
#if defined WLAN_FEATURE_VOWIFI
pAddBssParams->maxTxPower = pftSessionEntry->maxTxPower;
#endif
pAddBssParams->status = eHAL_STATUS_SUCCESS;
pAddBssParams->respReqd = true;
pAddBssParams->staContext.sessionId = pftSessionEntry->peSessionId;
pAddBssParams->sessionId = pftSessionEntry->peSessionId;
// Set a new state for MLME
pftSessionEntry->limMlmState = eLIM_MLM_WT_ADD_BSS_RSP_FT_REASSOC_STATE;
MTRACE(macTrace(pMac, TRACE_CODE_MLM_STATE, pftSessionEntry->peSessionId, eLIM_MLM_WT_ADD_BSS_RSP_FT_REASSOC_STATE));
pAddBssParams->halPersona=(tANI_U8)pftSessionEntry->pePersona; //pass on the session persona to hal
pMac->ft.ftPEContext.pAddBssReq = pAddBssParams;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
limLog( pMac, LOG1, FL( "Saving SIR_HAL_ADD_BSS_REQ for pre-auth ap..." ));
#endif
vos_mem_free(pBeaconStruct);
return 0;
}
/*------------------------------------------------------------------
*
* Setup the new session for the pre-auth AP.
* Return the newly created session entry.
*
*------------------------------------------------------------------*/
tpPESession limFillFTSession(tpAniSirGlobal pMac,
tpSirBssDescription pbssDescription, tpPESession psessionEntry)
{
tpPESession pftSessionEntry;
tANI_U8 currentBssUapsd;
tPowerdBm localPowerConstraint;
tPowerdBm regMax;
tSchBeaconStruct *pBeaconStruct;
pBeaconStruct = vos_mem_malloc(sizeof(tSchBeaconStruct));
if (NULL == pBeaconStruct)
{
limLog(pMac, LOGE, FL("Unable to allocate memory for creating limFillFTSession") );
return NULL;
}
/* Retrieve the session that has already been created and update the entry */
pftSessionEntry = pMac->ft.ftPEContext.pftSessionEntry;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG || defined FEATURE_WLAN_CCX || defined(FEATURE_WLAN_LFR)
limPrintMacAddr(pMac, pbssDescription->bssId, LOG1);
#endif
pftSessionEntry->dot11mode = psessionEntry->dot11mode;
pftSessionEntry->htCapability = psessionEntry->htCapability;
pftSessionEntry->limWmeEnabled = psessionEntry->limWmeEnabled;
pftSessionEntry->limQosEnabled = psessionEntry->limQosEnabled;
pftSessionEntry->limWsmEnabled = psessionEntry->limWsmEnabled;
pftSessionEntry->lim11hEnable = psessionEntry->lim11hEnable;
// Fields to be filled later
pftSessionEntry->pLimJoinReq = NULL;
pftSessionEntry->smeSessionId = 0;
pftSessionEntry->transactionId = 0;
limExtractApCapabilities( pMac,
(tANI_U8 *) pbssDescription->ieFields,
limGetIElenFromBssDescription( pbssDescription ),
pBeaconStruct );
pftSessionEntry->rateSet.numRates = pBeaconStruct->supportedRates.numRates;
vos_mem_copy(pftSessionEntry->rateSet.rate,
pBeaconStruct->supportedRates.rate, pBeaconStruct->supportedRates.numRates);
pftSessionEntry->extRateSet.numRates = pBeaconStruct->extendedRates.numRates;
vos_mem_copy(pftSessionEntry->extRateSet.rate,
pBeaconStruct->extendedRates.rate, pftSessionEntry->extRateSet.numRates);
pftSessionEntry->ssId.length = pBeaconStruct->ssId.length;
vos_mem_copy(pftSessionEntry->ssId.ssId, pBeaconStruct->ssId.ssId,
pftSessionEntry->ssId.length);
// Self Mac
sirCopyMacAddr(pftSessionEntry->selfMacAddr, psessionEntry->selfMacAddr);
sirCopyMacAddr(pftSessionEntry->limReAssocbssId, pbssDescription->bssId);
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG || defined FEATURE_WLAN_CCX || defined(FEATURE_WLAN_LFR)
limPrintMacAddr(pMac, pftSessionEntry->limReAssocbssId, LOG1);
#endif
/* Store beaconInterval */
pftSessionEntry->beaconParams.beaconInterval = pbssDescription->beaconInterval;
pftSessionEntry->bssType = psessionEntry->bssType;
pftSessionEntry->statypeForBss = STA_ENTRY_PEER;
pftSessionEntry->nwType = pbssDescription->nwType;
/* Copy The channel Id to the session Table */
pftSessionEntry->limReassocChannelId = pbssDescription->channelId;
pftSessionEntry->currentOperChannel = pbssDescription->channelId;
if (pftSessionEntry->bssType == eSIR_INFRASTRUCTURE_MODE)
{
pftSessionEntry->limSystemRole = eLIM_STA_ROLE;
}
else if(pftSessionEntry->bssType == eSIR_BTAMP_AP_MODE)
{
pftSessionEntry->limSystemRole = eLIM_BT_AMP_STA_ROLE;
}
else
{
/* Throw an error and return and make sure to delete the session.*/
limLog(pMac, LOGE, FL("Invalid bss type"));
}
pftSessionEntry->limCurrentBssCaps = pbssDescription->capabilityInfo;
pftSessionEntry->limReassocBssCaps = pbssDescription->capabilityInfo;
if( pMac->roam.configParam.shortSlotTime &&
SIR_MAC_GET_SHORT_SLOT_TIME(pftSessionEntry->limReassocBssCaps))
{
pftSessionEntry->shortSlotTimeSupported = TRUE;
}
regMax = cfgGetRegulatoryMaxTransmitPower( pMac, pftSessionEntry->currentOperChannel );
localPowerConstraint = regMax;
limExtractApCapability( pMac, (tANI_U8 *) pbssDescription->ieFields,
limGetIElenFromBssDescription(pbssDescription),
&pftSessionEntry->limCurrentBssQosCaps,
&pftSessionEntry->limCurrentBssPropCap,
¤tBssUapsd , &localPowerConstraint, psessionEntry);
pftSessionEntry->limReassocBssQosCaps =
pftSessionEntry->limCurrentBssQosCaps;
pftSessionEntry->limReassocBssPropCap =
pftSessionEntry->limCurrentBssPropCap;
#ifdef FEATURE_WLAN_CCX
pftSessionEntry->maxTxPower = limGetMaxTxPower(regMax, localPowerConstraint, pMac->roam.configParam.nTxPowerCap);
#else
pftSessionEntry->maxTxPower = VOS_MIN( regMax , (localPowerConstraint) );
#endif
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
limLog( pMac, LOG1, "%s: Regulatory max = %d, local power constraint = %d, ini tx power = %d, max tx = %d",
__func__, regMax, localPowerConstraint, pMac->roam.configParam.nTxPowerCap, pftSessionEntry->maxTxPower );
#endif
pftSessionEntry->limRFBand = limGetRFBand(pftSessionEntry->currentOperChannel);
pftSessionEntry->limPrevSmeState = pftSessionEntry->limSmeState;
pftSessionEntry->limSmeState = eLIM_SME_WT_REASSOC_STATE;
MTRACE(macTrace(pMac, TRACE_CODE_SME_STATE, pftSessionEntry->peSessionId, pftSessionEntry->limSmeState));
pftSessionEntry->encryptType = psessionEntry->encryptType;
#ifdef WLAN_FEATURE_11AC
pftSessionEntry->vhtCapability = psessionEntry->vhtCapability;
pftSessionEntry->vhtCapabilityPresentInBeacon = psessionEntry->vhtCapabilityPresentInBeacon;
#endif
vos_mem_free(pBeaconStruct);
return pftSessionEntry;
}
/*------------------------------------------------------------------
*
* Setup the session and the add bss req for the pre-auth AP.
*
*------------------------------------------------------------------*/
void limFTSetupAuthSession(tpAniSirGlobal pMac, tpPESession psessionEntry)
{
tpPESession pftSessionEntry;
// Prepare the session right now with as much as possible.
pftSessionEntry = limFillFTSession(pMac, pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription, psessionEntry);
if (pftSessionEntry)
{
pftSessionEntry->is11Rconnection = psessionEntry->is11Rconnection;
#ifdef FEATURE_WLAN_CCX
pftSessionEntry->isCCXconnection = psessionEntry->isCCXconnection;
#endif
#if defined WLAN_FEATURE_VOWIFI_11R || defined FEATURE_WLAN_CCX || defined(FEATURE_WLAN_LFR)
pftSessionEntry->isFastTransitionEnabled = psessionEntry->isFastTransitionEnabled;
#endif
#ifdef FEATURE_WLAN_LFR
pftSessionEntry->isFastRoamIniFeatureEnabled = psessionEntry->isFastRoamIniFeatureEnabled;
#endif
limFTPrepareAddBssReq( pMac, FALSE, pftSessionEntry,
pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription );
pMac->ft.ftPEContext.pftSessionEntry = pftSessionEntry;
}
}
/*------------------------------------------------------------------
* Resume Link Call Back
*------------------------------------------------------------------*/
void limFTProcessPreAuthResult(tpAniSirGlobal pMac, eHalStatus status, tANI_U32 *data)
{
tpPESession psessionEntry;
if (!pMac->ft.ftPEContext.pFTPreAuthReq)
return;
psessionEntry = (tpPESession)data;
if (pMac->ft.ftPEContext.ftPreAuthStatus == eSIR_SUCCESS)
{
limFTSetupAuthSession(pMac, psessionEntry);
}
// Post the FT Pre Auth Response to SME
limPostFTPreAuthRsp(pMac, pMac->ft.ftPEContext.ftPreAuthStatus,
pMac->ft.ftPEContext.saved_auth_rsp,
pMac->ft.ftPEContext.saved_auth_rsp_length, psessionEntry);
}
/*------------------------------------------------------------------
* Resume Link Call Back
*------------------------------------------------------------------*/
void limPerformPostFTPreAuthAndChannelChange(tpAniSirGlobal pMac, eHalStatus status, tANI_U32 *data,
tpPESession psessionEntry)
{
//Set the resume channel to Any valid channel (invalid).
//This will instruct HAL to set it to any previous valid channel.
peSetResumeChannel(pMac, 0, 0);
limResumeLink(pMac, limFTProcessPreAuthResult, (tANI_U32 *)psessionEntry);
}
tSirRetStatus limCreateRICBlockAckIE(tpAniSirGlobal pMac, tANI_U8 tid, tCfgTrafficClass *pTrafficClass,
tANI_U8 *ric_ies, tANI_U32 *ieLength)
{
/* BlockACK + RIC is not supported now, TODO later to support this */
#if 0
tDot11fIERICDataDesc ricIe;
tDot11fFfBAStartingSequenceControl baSsnControl;
tDot11fFfAddBAParameterSet baParamSet;
tDot11fFfBATimeout baTimeout;
vos_mem_zero(&ricIe, sizeof(tDot11fIERICDataDesc));
vos_mem_zero(&baSsnControl, sizeof(tDot11fFfBAStartingSequenceControl));
vos_mem_zero(&baParamSet, sizeof(tDot11fFfAddBAParameterSet));
vos_mem_zero(&baTimeout, sizeof(tDot11fFfBATimeout));
ricIe.present = 1;
ricIe.RICData.present = 1;
ricIe.RICData.resourceDescCount = 1;
ricIe.RICData.Identifier = LIM_FT_RIC_BA_DIALOG_TOKEN_TID_0 + tid;
ricIe.RICDescriptor.present = 1;
ricIe.RICDescriptor.resourceType = LIM_FT_RIC_DESCRIPTOR_RESOURCE_TYPE_BA;
baParamSet.tid = tid;
baParamSet.policy = pTrafficClass->fTxBApolicy; // Immediate Block Ack
baParamSet.bufferSize = pTrafficClass->txBufSize;
vos_mem_copy((v_VOID_t *)&baTimeout, (v_VOID_t *)&pTrafficClass->tuTxBAWaitTimeout, sizeof(baTimeout));
baSsnControl.fragNumber = 0;
baSsnControl.ssn = LIM_FT_RIC_BA_SSN;
if (ricIe.RICDescriptor.num_variableData < sizeof (ricIe.RICDescriptor.variableData)) {
dot11fPackFfAddBAParameterSet(pMac, &baParamSet, &ricIe.RICDescriptor.variableData[ricIe.RICDescriptor.num_variableData]);
//vos_mem_copy(&ricIe.RICDescriptor.variableData[ricIe.RICDescriptor.num_variableData], &baParamSet, sizeof(tDot11fFfAddBAParameterSet));
ricIe.RICDescriptor.num_variableData += sizeof(tDot11fFfAddBAParameterSet);
}
if (ricIe.RICDescriptor.num_variableData < sizeof (ricIe.RICDescriptor.variableData)) {
dot11fPackFfBATimeout(pMac, &baTimeout, &ricIe.RICDescriptor.variableData[ricIe.RICDescriptor.num_variableData]);
//vos_mem_copy(&ricIe.RICDescriptor.variableData[ricIe.RICDescriptor.num_variableData], &baTimeout, sizeof(tDot11fFfBATimeout));
ricIe.RICDescriptor.num_variableData += sizeof(tDot11fFfBATimeout);
}
if (ricIe.RICDescriptor.num_variableData < sizeof (ricIe.RICDescriptor.variableData)) {
dot11fPackFfBAStartingSequenceControl(pMac, &baSsnControl, &ricIe.RICDescriptor.variableData[ricIe.RICDescriptor.num_variableData]);
//vos_mem_copy(&ricIe.RICDescriptor.variableData[ricIe.RICDescriptor.num_variableData], &baSsnControl, sizeof(tDot11fFfBAStartingSequenceControl));
ricIe.RICDescriptor.num_variableData += sizeof(tDot11fFfBAStartingSequenceControl);
}
return (tSirRetStatus) dot11fPackIeRICDataDesc(pMac, &ricIe, ric_ies, sizeof(tDot11fIERICDataDesc), ieLength);
#endif
return eSIR_FAILURE;
}
tSirRetStatus limFTFillRICBlockAckInfo(tpAniSirGlobal pMac, tANI_U8 *ric_ies, tANI_U32 *ric_ies_length)
{
tANI_U8 tid = 0;
tpDphHashNode pSta;
tANI_U16 numBA = 0, aid = 0;
tpPESession psessionEntry = pMac->ft.ftPEContext.psavedsessionEntry;
tANI_U32 offset = 0, ieLength = 0;
tSirRetStatus status = eSIR_SUCCESS;
// First, extract the DPH entry
pSta = dphLookupHashEntry( pMac, pMac->ft.ftPEContext.pFTPreAuthReq->currbssId, &aid, &psessionEntry->dph.dphHashTable);
if( NULL == pSta )
{
PELOGE(limLog( pMac, LOGE,
FL( "STA context not found for saved session's BSSID " MAC_ADDRESS_STR ),
MAC_ADDR_ARRAY(pMac->ft.ftPEContext.pFTPreAuthReq->currbssId));)
return eSIR_FAILURE;
}
for (tid = 0; tid < STACFG_MAX_TC; tid++)
{
if (pSta->tcCfg[tid].fUseBATx)
{
status = limCreateRICBlockAckIE(pMac, tid, &pSta->tcCfg[tid], ric_ies + offset, &ieLength);
if (eSIR_SUCCESS == status)
{
// TODO RIC
if ( ieLength > MAX_FTIE_SIZE )
{
ieLength = 0;
return status;
}
offset += ieLength;
*ric_ies_length += ieLength;
numBA++;
}
else
{
PELOGE(limLog(pMac, LOGE, FL("BA RIC IE creation for TID %d failed with status %d"), tid, status);)
}
}
}
PELOGE(limLog(pMac, LOGE, FL("Number of BA RIC IEs created = %d: Total length = %d"), numBA, *ric_ies_length);)
return status;
}
/*------------------------------------------------------------------
*
* Will post pre auth response to SME.
*
*------------------------------------------------------------------*/
void limPostFTPreAuthRsp(tpAniSirGlobal pMac, tSirRetStatus status,
tANI_U8 *auth_rsp, tANI_U16 auth_rsp_length,
tpPESession psessionEntry)
{
tpSirFTPreAuthRsp pFTPreAuthRsp;
tSirMsgQ mmhMsg;
tANI_U16 rspLen = sizeof(tSirFTPreAuthRsp);
// TODO: RIC Support
//tSirRetStatus sirStatus = eSIR_SUCCESS;
pFTPreAuthRsp = (tpSirFTPreAuthRsp)vos_mem_malloc(rspLen);
if (NULL == pFTPreAuthRsp)
{
PELOGE(limLog( pMac, LOGE, "Failed to allocate memory");)
VOS_ASSERT(pFTPreAuthRsp != NULL);
return;
}
vos_mem_zero( pFTPreAuthRsp, rspLen);
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOG1, "%s: Auth Rsp = %p", pFTPreAuthRsp);)
#endif
vos_mem_set((tANI_U8*)pFTPreAuthRsp, rspLen, 0);
pFTPreAuthRsp->messageType = eWNI_SME_FT_PRE_AUTH_RSP;
pFTPreAuthRsp->length = (tANI_U16) rspLen;
pFTPreAuthRsp->status = status;
if (psessionEntry)
pFTPreAuthRsp->smeSessionId = psessionEntry->smeSessionId;
// The bssid of the AP we are sending Auth1 to.
if (pMac->ft.ftPEContext.pFTPreAuthReq)
sirCopyMacAddr(pFTPreAuthRsp->preAuthbssId,
pMac->ft.ftPEContext.pFTPreAuthReq->preAuthbssId);
// Attach the auth response now back to SME
pFTPreAuthRsp->ft_ies_length = 0;
if ((auth_rsp != NULL) && (auth_rsp_length < MAX_FTIE_SIZE))
{
// Only 11r assoc has FT IEs.
vos_mem_copy(pFTPreAuthRsp->ft_ies, auth_rsp, auth_rsp_length);
pFTPreAuthRsp->ft_ies_length = auth_rsp_length;
}
#ifdef WLAN_FEATURE_VOWIFI_11R
if ((psessionEntry) && (psessionEntry->is11Rconnection))
{
/* TODO: RIC SUPPORT Fill in the Block Ack RIC IEs in the preAuthRsp */
/*
sirStatus = limFTFillRICBlockAckInfo(pMac, pFTPreAuthRsp->ric_ies,
(tANI_U32 *)&pFTPreAuthRsp->ric_ies_length);
if (eSIR_SUCCESS != sirStatus)
{
PELOGE(limLog(pMac, LOGE, FL("Fill RIC BA Info failed with status %d"), sirStatus);)
}
*/
}
#endif
mmhMsg.type = pFTPreAuthRsp->messageType;
mmhMsg.bodyptr = pFTPreAuthRsp;
mmhMsg.bodyval = 0;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOG1, "Posted Auth Rsp to SME with status of 0x%x", status);)
#endif
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
if (status == eSIR_SUCCESS)
limDiagEventReport(pMac, WLAN_PE_DIAG_PREAUTH_DONE, psessionEntry,
status, 0);
#endif
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
}
/*------------------------------------------------------------------
*
* Send the FT Pre Auth Response to SME when ever we have a status
* ready to be sent to SME
*
* SME will be the one to send it up to the supplicant to receive
* FTIEs which will be required for Reassoc Req.
*
*------------------------------------------------------------------*/
void limHandleFTPreAuthRsp(tpAniSirGlobal pMac, tSirRetStatus status,
tANI_U8 *auth_rsp, tANI_U16 auth_rsp_length,
tpPESession psessionEntry)
{
tpPESession pftSessionEntry;
tANI_U8 sessionId;
tpSirBssDescription pbssDescription;
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_PRE_AUTH_RSP_EVENT, psessionEntry, (tANI_U16)status, 0);
#endif
// Save the status of pre-auth
pMac->ft.ftPEContext.ftPreAuthStatus = status;
// Save the auth rsp, so we can send it to
// SME once we resume link.
pMac->ft.ftPEContext.saved_auth_rsp_length = 0;
if ((auth_rsp != NULL) && (auth_rsp_length < MAX_FTIE_SIZE))
{
vos_mem_copy(pMac->ft.ftPEContext.saved_auth_rsp,
auth_rsp, auth_rsp_length);
pMac->ft.ftPEContext.saved_auth_rsp_length = auth_rsp_length;
}
/* Create FT session for the re-association at this point */
if (pMac->ft.ftPEContext.ftPreAuthStatus == eSIR_SUCCESS)
{
pbssDescription = pMac->ft.ftPEContext.pFTPreAuthReq->pbssDescription;
if((pftSessionEntry = peCreateSession(pMac, pbssDescription->bssId,
&sessionId, pMac->lim.maxStation)) == NULL)
{
limLog(pMac, LOGE, FL("Session Can not be created for pre-auth 11R AP"));
return;
}
pftSessionEntry->peSessionId = sessionId;
sirCopyMacAddr(pftSessionEntry->selfMacAddr, psessionEntry->selfMacAddr);
sirCopyMacAddr(pftSessionEntry->limReAssocbssId, pbssDescription->bssId);
pftSessionEntry->bssType = psessionEntry->bssType;
if (pftSessionEntry->bssType == eSIR_INFRASTRUCTURE_MODE)
{
pftSessionEntry->limSystemRole = eLIM_STA_ROLE;
}
else if(pftSessionEntry->bssType == eSIR_BTAMP_AP_MODE)
{
pftSessionEntry->limSystemRole = eLIM_BT_AMP_STA_ROLE;
}
else
{
limLog(pMac, LOGE, FL("Invalid bss type"));
}
pftSessionEntry->limPrevSmeState = pftSessionEntry->limSmeState;
pftSessionEntry->limSmeState = eLIM_SME_WT_REASSOC_STATE;
pMac->ft.ftPEContext.pftSessionEntry = pftSessionEntry;
PELOGE(limLog(pMac, LOG1,"%s:created session (%p) with id = %d",
__func__, pftSessionEntry, pftSessionEntry->peSessionId);)
/* Update the ReAssoc BSSID of the current session */
sirCopyMacAddr(psessionEntry->limReAssocbssId, pbssDescription->bssId);
limPrintMacAddr(pMac, psessionEntry->limReAssocbssId, LOG1);
}
if (psessionEntry->currentOperChannel !=
pMac->ft.ftPEContext.pFTPreAuthReq->preAuthchannelNum)
{
// Need to move to the original AP channel
limChangeChannelWithCallback(pMac, psessionEntry->currentOperChannel,
limPerformPostFTPreAuthAndChannelChange, NULL, psessionEntry);
}
else
{
#ifdef WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOGE(limLog( pMac, LOG1, "Pre auth on same channel as connected AP channel %d",
pMac->ft.ftPEContext.pFTPreAuthReq->preAuthchannelNum);)
#endif
limFTProcessPreAuthResult(pMac, status, (tANI_U32 *)psessionEntry);
}
}
/*------------------------------------------------------------------
*
* This function handles the 11R Reassoc Req from SME
*
*------------------------------------------------------------------*/
void limProcessMlmFTReassocReq(tpAniSirGlobal pMac, tANI_U32 *pMsgBuf,
tpPESession psessionEntry)
{
tANI_U8 smeSessionId = 0;
tANI_U16 transactionId = 0;
tANI_U8 chanNum = 0;
tLimMlmReassocReq *pMlmReassocReq;
tANI_U16 caps;
tANI_U32 val;
tSirMsgQ msgQ;
tSirRetStatus retCode;
tANI_U32 teleBcnEn = 0;
chanNum = psessionEntry->currentOperChannel;
limGetSessionInfo(pMac,(tANI_U8*)pMsgBuf, &smeSessionId, &transactionId);
psessionEntry->smeSessionId = smeSessionId;
psessionEntry->transactionId = transactionId;
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_REASSOCIATING, psessionEntry, 0, 0);
#endif
if (NULL == pMac->ft.ftPEContext.pAddBssReq)
{
limLog(pMac, LOGE, FL("pAddBssReq is NULL"));
return;
}
pMlmReassocReq = vos_mem_malloc(sizeof(tLimMlmReassocReq));
if (NULL == pMlmReassocReq)
{
// Log error
limLog(pMac, LOGE, FL("call to AllocateMemory failed for mlmReassocReq"));
return;
}
vos_mem_copy(pMlmReassocReq->peerMacAddr,
psessionEntry->bssId,
sizeof(tSirMacAddr));
if (wlan_cfgGetInt(pMac, WNI_CFG_REASSOCIATION_FAILURE_TIMEOUT,
(tANI_U32 *) &pMlmReassocReq->reassocFailureTimeout)
!= eSIR_SUCCESS)
{
/**
* Could not get ReassocFailureTimeout value
* from CFG. Log error.
*/
limLog(pMac, LOGE, FL("could not retrieve ReassocFailureTimeout value"));
vos_mem_free(pMlmReassocReq);
return;
}
if (cfgGetCapabilityInfo(pMac, &caps,psessionEntry) != eSIR_SUCCESS)
{
/**
* Could not get Capabilities value
* from CFG. Log error.
*/
limLog(pMac, LOGE, FL("could not retrieve Capabilities value"));
vos_mem_free(pMlmReassocReq);
return;
}
pMlmReassocReq->capabilityInfo = caps;
/* Update PE sessionId*/
pMlmReassocReq->sessionId = psessionEntry->peSessionId;
/* If telescopic beaconing is enabled, set listen interval to WNI_CFG_TELE_BCN_MAX_LI */
if (wlan_cfgGetInt(pMac, WNI_CFG_TELE_BCN_WAKEUP_EN, &teleBcnEn) !=
eSIR_SUCCESS)
{
limLog(pMac, LOGP, FL("Couldn't get WNI_CFG_TELE_BCN_WAKEUP_EN"));
vos_mem_free(pMlmReassocReq);
return;
}
if (teleBcnEn)
{
if (wlan_cfgGetInt(pMac, WNI_CFG_TELE_BCN_MAX_LI, &val) != eSIR_SUCCESS)
{
/**
* Could not get ListenInterval value
* from CFG. Log error.
*/
limLog(pMac, LOGE, FL("could not retrieve ListenInterval"));
vos_mem_free(pMlmReassocReq);
return;
}
}
else
{
if (wlan_cfgGetInt(pMac, WNI_CFG_LISTEN_INTERVAL, &val) != eSIR_SUCCESS)
{
/**
* Could not get ListenInterval value
* from CFG. Log error.
*/
limLog(pMac, LOGE, FL("could not retrieve ListenInterval"));
vos_mem_free(pMlmReassocReq);
return;
}
}
if (limSetLinkState(pMac, eSIR_LINK_PREASSOC_STATE, psessionEntry->bssId,
psessionEntry->selfMacAddr, NULL, NULL) != eSIR_SUCCESS)
{
vos_mem_free(pMlmReassocReq);
return;
}
pMlmReassocReq->listenInterval = (tANI_U16) val;
psessionEntry->pLimMlmReassocReq = pMlmReassocReq;
//we need to defer the message until we get the response back from HAL.
SET_LIM_PROCESS_DEFD_MESGS(pMac, false);
msgQ.type = SIR_HAL_ADD_BSS_REQ;
msgQ.reserved = 0;
msgQ.bodyptr = pMac->ft.ftPEContext.pAddBssReq;
msgQ.bodyval = 0;
#if defined WLAN_FEATURE_VOWIFI_11R_DEBUG
limLog( pMac, LOG1, FL( "Sending SIR_HAL_ADD_BSS_REQ..." ));
#endif
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msgQ.type));
retCode = wdaPostCtrlMsg( pMac, &msgQ );
if( eSIR_SUCCESS != retCode)
{
vos_mem_free(pMac->ft.ftPEContext.pAddBssReq);
limLog( pMac, LOGE, FL("Posting ADD_BSS_REQ to HAL failed, reason=%X"),
retCode );
}
// Dont need this anymore
pMac->ft.ftPEContext.pAddBssReq = NULL;
return;
}
/*------------------------------------------------------------------
*
* This function is called if preauth response is not received from the AP
* within this timeout while FT in progress
*
*------------------------------------------------------------------*/
void limProcessFTPreauthRspTimeout(tpAniSirGlobal pMac)
{
tpPESession psessionEntry;
// We have failed pre auth. We need to resume link and get back on
// home channel.
limLog(pMac, LOG1, FL("FT Pre-Auth Time Out!!!!"));
if((psessionEntry = peFindSessionBySessionId(pMac, pMac->lim.limTimers.gLimFTPreAuthRspTimer.sessionId))== NULL)
{
limLog(pMac, LOGE, FL("Session Does not exist for given sessionID"));
return;
}
// Ok, so attempted at Pre-Auth and failed. If we are off channel. We need
// to get back.
limHandleFTPreAuthRsp(pMac, eSIR_FAILURE, NULL, 0, psessionEntry);
}
/*------------------------------------------------------------------
*
* This function is called to process the update key request from SME
*
*------------------------------------------------------------------*/
tANI_BOOLEAN limProcessFTUpdateKey(tpAniSirGlobal pMac, tANI_U32 *pMsgBuf )
{
tAddBssParams * pAddBssParams;
tSirFTUpdateKeyInfo * pKeyInfo;
tANI_U32 val = 0;
/* Sanity Check */
if( pMac == NULL || pMsgBuf == NULL )
{
return TRUE;
}
if(pMac->ft.ftPEContext.pAddBssReq == NULL)
{
limLog( pMac, LOGE,
FL( "pAddBssReq is NULL" ));
return TRUE;
}
pAddBssParams = pMac->ft.ftPEContext.pAddBssReq;
pKeyInfo = (tSirFTUpdateKeyInfo *)pMsgBuf;
/* Store the key information in the ADD BSS parameters */
pAddBssParams->extSetStaKeyParamValid = 1;
pAddBssParams->extSetStaKeyParam.encType = pKeyInfo->keyMaterial.edType;
vos_mem_copy((tANI_U8 *) &pAddBssParams->extSetStaKeyParam.key,
(tANI_U8 *) &pKeyInfo->keyMaterial.key, sizeof(tSirKeys));
if(eSIR_SUCCESS != wlan_cfgGetInt(pMac, WNI_CFG_SINGLE_TID_RC, &val))
{
limLog( pMac, LOGP, FL( "Unable to read WNI_CFG_SINGLE_TID_RC" ));
}
pAddBssParams->extSetStaKeyParam.singleTidRc = val;
PELOG1(limLog(pMac, LOG1, FL("Key valid %d"),
pAddBssParams->extSetStaKeyParamValid,
pAddBssParams->extSetStaKeyParam.key[0].keyLength);)
pAddBssParams->extSetStaKeyParam.staIdx = 0;
PELOG1(limLog(pMac, LOG1,
FL("BSSID = "MAC_ADDRESS_STR), MAC_ADDR_ARRAY(pKeyInfo->bssId));)
if(pAddBssParams->extSetStaKeyParam.key[0].keyLength == 16)
{
PELOG1(limLog(pMac, LOG1,
FL("BSS key = %02X-%02X-%02X-%02X-%02X-%02X-%02X- "
"%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X"),
pAddBssParams->extSetStaKeyParam.key[0].key[0],
pAddBssParams->extSetStaKeyParam.key[0].key[1],
pAddBssParams->extSetStaKeyParam.key[0].key[2],
pAddBssParams->extSetStaKeyParam.key[0].key[3],
pAddBssParams->extSetStaKeyParam.key[0].key[4],
pAddBssParams->extSetStaKeyParam.key[0].key[5],
pAddBssParams->extSetStaKeyParam.key[0].key[6],
pAddBssParams->extSetStaKeyParam.key[0].key[7],
pAddBssParams->extSetStaKeyParam.key[0].key[8],
pAddBssParams->extSetStaKeyParam.key[0].key[9],
pAddBssParams->extSetStaKeyParam.key[0].key[10],
pAddBssParams->extSetStaKeyParam.key[0].key[11],
pAddBssParams->extSetStaKeyParam.key[0].key[12],
pAddBssParams->extSetStaKeyParam.key[0].key[13],
pAddBssParams->extSetStaKeyParam.key[0].key[14],
pAddBssParams->extSetStaKeyParam.key[0].key[15]);)
}
return TRUE;
}
tSirRetStatus
limProcessFTAggrQosReq(tpAniSirGlobal pMac, tANI_U32 *pMsgBuf )
{
tSirMsgQ msg;
tSirAggrQosReq * aggrQosReq = (tSirAggrQosReq *)pMsgBuf;
tpAggrAddTsParams pAggrAddTsParam;
tpPESession psessionEntry = NULL;
tpLimTspecInfo tspecInfo;
tANI_U8 ac;
tpDphHashNode pSta;
tANI_U16 aid;
tANI_U8 sessionId;
int i;
pAggrAddTsParam = vos_mem_malloc(sizeof(tAggrAddTsParams));
if (NULL == pAggrAddTsParam)
{
PELOGE(limLog(pMac, LOGE, FL("AllocateMemory() failed"));)
return eSIR_MEM_ALLOC_FAILED;
}
psessionEntry = peFindSessionByBssid(pMac, aggrQosReq->bssId, &sessionId);
if (psessionEntry == NULL) {
PELOGE(limLog(pMac, LOGE, FL("psession Entry Null for sessionId = %d"), aggrQosReq->sessionId);)
vos_mem_free(pAggrAddTsParam);
return eSIR_FAILURE;
}
pSta = dphLookupHashEntry(pMac, aggrQosReq->bssId, &aid, &psessionEntry->dph.dphHashTable);
if (pSta == NULL)
{
PELOGE(limLog(pMac, LOGE, FL("Station context not found - ignoring AddTsRsp"));)
vos_mem_free(pAggrAddTsParam);
return eSIR_FAILURE;
}
vos_mem_set((tANI_U8 *)pAggrAddTsParam,
sizeof(tAggrAddTsParams), 0);
pAggrAddTsParam->staIdx = psessionEntry->staId;
// Fill in the sessionId specific to PE
pAggrAddTsParam->sessionId = sessionId;
pAggrAddTsParam->tspecIdx = aggrQosReq->aggrInfo.tspecIdx;
for( i = 0; i < HAL_QOS_NUM_AC_MAX; i++ )
{
if (aggrQosReq->aggrInfo.tspecIdx & (1<<i))
{
tSirMacTspecIE *pTspec = &aggrQosReq->aggrInfo.aggrAddTsInfo[i].tspec;
/* Since AddTS response was successful, check for the PSB flag
* and directional flag inside the TS Info field.
* An AC is trigger enabled AC if the PSB subfield is set to 1
* in the uplink direction.
* An AC is delivery enabled AC if the PSB subfield is set to 1
* in the downlink direction.
* An AC is trigger and delivery enabled AC if the PSB subfield
* is set to 1 in the bi-direction field.
*/
if (pTspec->tsinfo.traffic.psb == 1)
{
limSetTspecUapsdMask(pMac, &pTspec->tsinfo, SET_UAPSD_MASK);
}
else
{
limSetTspecUapsdMask(pMac, &pTspec->tsinfo, CLEAR_UAPSD_MASK);
}
/* ADDTS success, so AC is now admitted. We shall now use the default
* EDCA parameters as advertised by AP and send the updated EDCA params
* to HAL.
*/
ac = upToAc(pTspec->tsinfo.traffic.userPrio);
if(pTspec->tsinfo.traffic.direction == SIR_MAC_DIRECTION_UPLINK)
{
pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] |= (1 << ac);
}
else if(pTspec->tsinfo.traffic.direction == SIR_MAC_DIRECTION_DNLINK)
{
pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_DNLINK] |= (1 << ac);
}
else if(pTspec->tsinfo.traffic.direction == SIR_MAC_DIRECTION_BIDIR)
{
pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_UPLINK] |= (1 << ac);
pMac->lim.gAcAdmitMask[SIR_MAC_DIRECTION_DNLINK] |= (1 << ac);
}
limSetActiveEdcaParams(pMac, psessionEntry->gLimEdcaParams, psessionEntry);
if (pSta->aniPeer == eANI_BOOLEAN_TRUE)
{
limSendEdcaParams(pMac, psessionEntry->gLimEdcaParamsActive, pSta->bssId, eANI_BOOLEAN_TRUE);
}
else
{
limSendEdcaParams(pMac, psessionEntry->gLimEdcaParamsActive, pSta->bssId, eANI_BOOLEAN_FALSE);
}
if(eSIR_SUCCESS != limTspecAdd(pMac, pSta->staAddr, pSta->assocId, pTspec, 0, &tspecInfo))
{
PELOGE(limLog(pMac, LOGE, FL("Adding entry in lim Tspec Table failed "));)
pMac->lim.gLimAddtsSent = false;
vos_mem_free(pAggrAddTsParam);
return eSIR_FAILURE; //Error handling. send the response with error status. need to send DelTS to tear down the TSPEC status.
}
// Copy the TSPEC paramters
pAggrAddTsParam->tspec[i] = aggrQosReq->aggrInfo.aggrAddTsInfo[i].tspec;
}
}
msg.type = WDA_AGGR_QOS_REQ;
msg.bodyptr = pAggrAddTsParam;
msg.bodyval = 0;
/* We need to defer any incoming messages until we get a
* WDA_AGGR_QOS_RSP from HAL.
*/
SET_LIM_PROCESS_DEFD_MESGS(pMac, false);
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, msg.type));
if(eSIR_SUCCESS != wdaPostCtrlMsg(pMac, &msg))
{
PELOGW(limLog(pMac, LOGW, FL("wdaPostCtrlMsg() failed"));)
SET_LIM_PROCESS_DEFD_MESGS(pMac, true);
vos_mem_free(pAggrAddTsParam);
return eSIR_FAILURE;
}
return eSIR_SUCCESS;
}
void
limFTSendAggrQosRsp(tpAniSirGlobal pMac, tANI_U8 rspReqd,
tpAggrAddTsParams aggrQosRsp, tANI_U8 smesessionId)
{
tpSirAggrQosRsp rsp;
int i = 0;
if (! rspReqd)
{
return;
}
rsp = vos_mem_malloc(sizeof(tSirAggrQosRsp));
if (NULL == rsp)
{
limLog(pMac, LOGP, FL("AllocateMemory failed for tSirAggrQosRsp"));
return;
}
vos_mem_set((tANI_U8 *) rsp, sizeof(*rsp), 0);
rsp->messageType = eWNI_SME_FT_AGGR_QOS_RSP;
rsp->sessionId = smesessionId;
rsp->length = sizeof(*rsp);
rsp->aggrInfo.tspecIdx = aggrQosRsp->tspecIdx;
for( i = 0; i < SIR_QOS_NUM_AC_MAX; i++ )
{
if( (1 << i) & aggrQosRsp->tspecIdx )
{
rsp->aggrInfo.aggrRsp[i].status = aggrQosRsp->status[i];
rsp->aggrInfo.aggrRsp[i].tspec = aggrQosRsp->tspec[i];
}
}
limSendSmeAggrQosRsp(pMac, rsp, smesessionId);
return;
}
void limProcessFTAggrQoSRsp(tpAniSirGlobal pMac, tpSirMsgQ limMsg)
{
tpAggrAddTsParams pAggrQosRspMsg = NULL;
//tpAggrQosParams pAggrQosRspMsg = NULL;
tAddTsParams addTsParam = {0};
tpDphHashNode pSta = NULL;
tANI_U16 assocId =0;
tSirMacAddr peerMacAddr;
tANI_U8 rspReqd = 1;
tpPESession psessionEntry = NULL;
int i = 0;
PELOG1(limLog(pMac, LOG1, FL(" Received AGGR_QOS_RSP from HAL"));)
/* Need to process all the deferred messages enqueued since sending the
SIR_HAL_AGGR_ADD_TS_REQ */
SET_LIM_PROCESS_DEFD_MESGS(pMac, true);
pAggrQosRspMsg = (tpAggrAddTsParams) (limMsg->bodyptr);
if (NULL == pAggrQosRspMsg)
{
PELOGE(limLog(pMac, LOGE, FL("NULL pAggrQosRspMsg"));)
return;
}
psessionEntry = peFindSessionBySessionId(pMac, pAggrQosRspMsg->sessionId);
if (NULL == psessionEntry)
{
// Cant find session entry
PELOGE(limLog(pMac, LOGE, FL("Cant find session entry for %s"), __func__);)
if( pAggrQosRspMsg != NULL )
{
vos_mem_free(pAggrQosRspMsg);
}
return;
}
for( i = 0; i < HAL_QOS_NUM_AC_MAX; i++ )
{
if((((1 << i) & pAggrQosRspMsg->tspecIdx)) &&
(pAggrQosRspMsg->status[i] != eHAL_STATUS_SUCCESS))
{
/* send DELTS to the station */
sirCopyMacAddr(peerMacAddr,psessionEntry->bssId);
addTsParam.staIdx = pAggrQosRspMsg->staIdx;
addTsParam.sessionId = pAggrQosRspMsg->sessionId;
addTsParam.tspec = pAggrQosRspMsg->tspec[i];
addTsParam.tspecIdx = pAggrQosRspMsg->tspecIdx;
limSendDeltsReqActionFrame(pMac, peerMacAddr, rspReqd,
&addTsParam.tspec.tsinfo,
&addTsParam.tspec, psessionEntry);
pSta = dphLookupAssocId(pMac, addTsParam.staIdx, &assocId,
&psessionEntry->dph.dphHashTable);
if (pSta != NULL)
{
limAdmitControlDeleteTS(pMac, assocId, &addTsParam.tspec.tsinfo,
NULL, (tANI_U8 *)&addTsParam.tspecIdx);
}
}
}
/* Send the Aggr QoS response to SME */
limFTSendAggrQosRsp(pMac, rspReqd, pAggrQosRspMsg,
psessionEntry->smeSessionId);
if( pAggrQosRspMsg != NULL )
{
vos_mem_free(pAggrQosRspMsg);
}
return;
}
#endif /* WLAN_FEATURE_VOWIFI_11R */
| gpl-2.0 |
AnesHadzi/linux-socfpga | drivers/gpio/gpio-timberdale.c | 64 | 8660 | /*
* Timberdale FPGA GPIO driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA GPIO
*/
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/timb_gpio.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#define DRIVER_NAME "timb-gpio"
#define TGPIOVAL 0x00
#define TGPIODIR 0x04
#define TGPIO_IER 0x08
#define TGPIO_ISR 0x0c
#define TGPIO_IPR 0x10
#define TGPIO_ICR 0x14
#define TGPIO_FLR 0x18
#define TGPIO_LVR 0x1c
#define TGPIO_VER 0x20
#define TGPIO_BFLR 0x24
struct timbgpio {
void __iomem *membase;
spinlock_t lock; /* mutual exclusion */
struct gpio_chip gpio;
int irq_base;
unsigned long last_ier;
};
static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
unsigned offset, bool enabled)
{
struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
u32 reg;
spin_lock(&tgpio->lock);
reg = ioread32(tgpio->membase + offset);
if (enabled)
reg |= (1 << index);
else
reg &= ~(1 << index);
iowrite32(reg, tgpio->membase + offset);
spin_unlock(&tgpio->lock);
return 0;
}
static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
}
static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
u32 value;
value = ioread32(tgpio->membase + TGPIOVAL);
return (value & (1 << nr)) ? 1 : 0;
}
static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
unsigned nr, int val)
{
return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
}
static void timbgpio_gpio_set(struct gpio_chip *gpio,
unsigned nr, int val)
{
timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
}
static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
{
struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
if (tgpio->irq_base <= 0)
return -EINVAL;
return tgpio->irq_base + offset;
}
/*
* GPIO IRQ
*/
static void timbgpio_irq_disable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier &= ~(1UL << offset);
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
}
static void timbgpio_irq_enable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier |= 1UL << offset;
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
}
static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
unsigned long flags;
u32 lvr, flr, bflr = 0;
u32 ver;
int ret = 0;
if (offset < 0 || offset > tgpio->gpio.ngpio)
return -EINVAL;
ver = ioread32(tgpio->membase + TGPIO_VER);
spin_lock_irqsave(&tgpio->lock, flags);
lvr = ioread32(tgpio->membase + TGPIO_LVR);
flr = ioread32(tgpio->membase + TGPIO_FLR);
if (ver > 2)
bflr = ioread32(tgpio->membase + TGPIO_BFLR);
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
bflr &= ~(1 << offset);
flr &= ~(1 << offset);
if (trigger & IRQ_TYPE_LEVEL_HIGH)
lvr |= 1 << offset;
else
lvr &= ~(1 << offset);
}
if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
if (ver < 3) {
ret = -EINVAL;
goto out;
} else {
flr |= 1 << offset;
bflr |= 1 << offset;
}
} else {
bflr &= ~(1 << offset);
flr |= 1 << offset;
if (trigger & IRQ_TYPE_EDGE_FALLING)
lvr &= ~(1 << offset);
else
lvr |= 1 << offset;
}
iowrite32(lvr, tgpio->membase + TGPIO_LVR);
iowrite32(flr, tgpio->membase + TGPIO_FLR);
if (ver > 2)
iowrite32(bflr, tgpio->membase + TGPIO_BFLR);
iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
out:
spin_unlock_irqrestore(&tgpio->lock, flags);
return ret;
}
static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
{
struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
unsigned long ipr;
int offset;
data->chip->irq_ack(data);
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
/*
* Some versions of the hardware trash the IER register if more than
* one interrupt is received simultaneously.
*/
iowrite32(0, tgpio->membase + TGPIO_IER);
for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
}
static struct irq_chip timbgpio_irqchip = {
.name = "GPIO",
.irq_enable = timbgpio_irq_enable,
.irq_disable = timbgpio_irq_disable,
.irq_set_type = timbgpio_irq_type,
};
static int timbgpio_probe(struct platform_device *pdev)
{
int err, i;
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
struct timbgpio *tgpio;
struct resource *iomem;
struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
int irq = platform_get_irq(pdev, 0);
if (!pdata || pdata->nr_pins > 32) {
dev_err(dev, "Invalid platform data\n");
return -EINVAL;
}
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
dev_err(dev, "Unable to get resource\n");
return -EINVAL;
}
tgpio = devm_kzalloc(dev, sizeof(struct timbgpio), GFP_KERNEL);
if (!tgpio) {
dev_err(dev, "Memory alloc failed\n");
return -EINVAL;
}
tgpio->irq_base = pdata->irq_base;
spin_lock_init(&tgpio->lock);
if (!devm_request_mem_region(dev, iomem->start, resource_size(iomem),
DRIVER_NAME)) {
dev_err(dev, "Region already claimed\n");
return -EBUSY;
}
tgpio->membase = devm_ioremap(dev, iomem->start, resource_size(iomem));
if (!tgpio->membase) {
dev_err(dev, "Cannot ioremap\n");
return -ENOMEM;
}
gc = &tgpio->gpio;
gc->label = dev_name(&pdev->dev);
gc->owner = THIS_MODULE;
gc->dev = &pdev->dev;
gc->direction_input = timbgpio_gpio_direction_input;
gc->get = timbgpio_gpio_get;
gc->direction_output = timbgpio_gpio_direction_output;
gc->set = timbgpio_gpio_set;
gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
gc->dbg_show = NULL;
gc->base = pdata->gpio_base;
gc->ngpio = pdata->nr_pins;
gc->can_sleep = false;
err = gpiochip_add(gc);
if (err)
return err;
platform_set_drvdata(pdev, tgpio);
/* make sure to disable interrupts */
iowrite32(0x0, tgpio->membase + TGPIO_IER);
if (irq < 0 || tgpio->irq_base <= 0)
return 0;
for (i = 0; i < pdata->nr_pins; i++) {
irq_set_chip_and_handler(tgpio->irq_base + i,
&timbgpio_irqchip, handle_simple_irq);
irq_set_chip_data(tgpio->irq_base + i, tgpio);
irq_clear_status_flags(tgpio->irq_base + i, IRQ_NOREQUEST | IRQ_NOPROBE);
}
irq_set_chained_handler_and_data(irq, timbgpio_irq, tgpio);
return 0;
}
static int timbgpio_remove(struct platform_device *pdev)
{
struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timbgpio *tgpio = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
if (irq >= 0 && tgpio->irq_base > 0) {
int i;
for (i = 0; i < pdata->nr_pins; i++) {
irq_set_chip(tgpio->irq_base + i, NULL);
irq_set_chip_data(tgpio->irq_base + i, NULL);
}
irq_set_handler(irq, NULL);
irq_set_handler_data(irq, NULL);
}
gpiochip_remove(&tgpio->gpio);
return 0;
}
static struct platform_driver timbgpio_platform_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = timbgpio_probe,
.remove = timbgpio_remove,
};
/*--------------------------------------------------------------------------*/
module_platform_driver(timbgpio_platform_driver);
MODULE_DESCRIPTION("Timberdale GPIO driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mocean Laboratories");
MODULE_ALIAS("platform:"DRIVER_NAME);
| gpl-2.0 |
getitnowmarketing/LG-2.6.32 | drivers/scsi/NCR_Q720.c | 832 | 9360 | /* -*- mode: c; c-basic-offset: 8 -*- */
/* NCR Quad 720 MCA SCSI Driver
*
* Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
*/
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mca.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "ncr53c8xx.h"
#include "NCR_Q720.h"
static struct ncr_chip q720_chip __initdata = {
.revision_id = 0x0f,
.burst_max = 3,
.offset_max = 8,
.nr_divisor = 4,
.features = FE_WIDE | FE_DIFF | FE_VARCLK,
};
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver");
MODULE_LICENSE("GPL");
#define NCR_Q720_VERSION "0.9"
/* We needs this helper because we have up to four hosts per struct device */
struct NCR_Q720_private {
struct device *dev;
void __iomem * mem_base;
__u32 phys_mem_base;
__u32 mem_size;
__u8 irq;
__u8 siops;
__u8 irq_enable;
struct Scsi_Host *hosts[4];
};
static struct scsi_host_template NCR_Q720_tpnt = {
.module = THIS_MODULE,
.proc_name = "NCR_Q720",
};
static irqreturn_t
NCR_Q720_intr(int irq, void *data)
{
struct NCR_Q720_private *p = (struct NCR_Q720_private *)data;
__u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4;
__u8 siop;
sir |= ~p->irq_enable;
if(sir == 0xff)
return IRQ_NONE;
while((siop = ffz(sir)) < p->siops) {
sir |= 1<<siop;
ncr53c8xx_intr(irq, p->hosts[siop]);
}
return IRQ_HANDLED;
}
static int __init
NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop,
int irq, int slot, __u32 paddr, void __iomem *vaddr)
{
struct ncr_device device;
__u8 scsi_id;
static int unit = 0;
__u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
__u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20;
__u8 version;
int error;
scsi_id = scsr1 >> 4;
/* enable burst length 16 (FIXME: should allow this) */
scsr1 |= 0x02;
/* force a siop reset */
scsr1 |= 0x04;
writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
udelay(10);
version = readb(vaddr + 0x18) >> 4;
memset(&device, 0, sizeof(struct ncr_device));
/* Initialise ncr_device structure with items required by ncr_attach. */
device.chip = q720_chip;
device.chip.revision_id = version;
device.host_id = scsi_id;
device.dev = p->dev;
device.slot.base = paddr;
device.slot.base_c = paddr;
device.slot.base_v = vaddr;
device.slot.irq = irq;
device.differential = differential ? 2 : 0;
printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop,
(unsigned long)paddr, differential, version);
p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device);
if (!p->hosts[siop])
goto fail;
p->irq_enable |= (1<<siop);
scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
/* clear the disable interrupt bit */
scsr1 &= ~0x01;
writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
error = scsi_add_host(p->hosts[siop], p->dev);
if (error)
ncr53c8xx_release(p->hosts[siop]);
else
scsi_scan_host(p->hosts[siop]);
return error;
fail:
return -ENODEV;
}
/* Detect a Q720 card. Note, because of the setup --- the chips are
* essentially connectecd to the MCA bus independently, it is easier
* to set them up as two separate host adapters, rather than one
* adapter with two channels */
static int __init
NCR_Q720_probe(struct device *dev)
{
struct NCR_Q720_private *p;
static int banner = 1;
struct mca_device *mca_dev = to_mca_device(dev);
int slot = mca_dev->slot;
int found = 0;
int irq, i, siops;
__u8 pos2, pos4, asr2, asr9, asr10;
__u16 io_base;
__u32 base_addr, mem_size;
void __iomem *mem_base;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
pos2 = mca_device_read_pos(mca_dev, 2);
/* enable device */
pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
mca_device_write_pos(mca_dev, 2, pos2);
io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT;
if(banner) {
printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n"
"NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n"
"NCR Q720:\n");
banner = 0;
}
io_base = mca_device_transform_ioport(mca_dev, io_base);
/* OK, this is phase one of the bootstrap, we now know the
* I/O space base address. All the configuration registers
* are mapped here (including pos) */
/* sanity check I/O mapping */
i = inb(io_base) | (inb(io_base+1)<<8);
if(i != NCR_Q720_MCA_ID) {
printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i);
kfree(p);
return -ENODEV;
}
/* Phase II, find the ram base and memory map the board register */
pos4 = inb(io_base + 4);
/* enable streaming data */
pos4 |= 0x01;
outb(pos4, io_base + 4);
base_addr = (pos4 & 0x7e) << 20;
base_addr += (pos4 & 0x80) << 23;
asr10 = inb(io_base + 0x12);
base_addr += (asr10 & 0x80) << 24;
base_addr += (asr10 & 0x70) << 23;
/* OK, got the base addr, now we need to find the ram size,
* enable and map it */
asr9 = inb(io_base + 0x11);
i = (asr9 & 0xc0) >> 6;
if(i == 0)
mem_size = 1024;
else
mem_size = 1 << (19 + i);
/* enable the sram mapping */
asr9 |= 0x20;
/* disable the rom mapping */
asr9 &= ~0x10;
outb(asr9, io_base + 0x11);
if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) {
printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx",
(unsigned long)base_addr,
(unsigned long)(base_addr + mem_size));
goto out_free;
}
if (dma_declare_coherent_memory(dev, base_addr, base_addr,
mem_size, DMA_MEMORY_MAP)
!= DMA_MEMORY_MAP) {
printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
goto out_release_region;
}
/* The first 1k of the memory buffer is a memory map of the registers
*/
mem_base = dma_mark_declared_memory_occupied(dev, base_addr,
1024);
if (IS_ERR(mem_base)) {
printk("NCR_Q720 failed to reserve memory mapped region\n");
goto out_release;
}
/* now also enable accesses in asr 2 */
asr2 = inb(io_base + 0x0a);
asr2 |= 0x01;
outb(asr2, io_base + 0x0a);
/* get the number of SIOPs (this should be 2 or 4) */
siops = ((asr2 & 0xe0) >> 5) + 1;
/* sanity check mapping (again) */
i = readw(mem_base);
if(i != NCR_Q720_MCA_ID) {
printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i);
goto out_release;
}
irq = readb(mem_base + 5) & 0x0f;
/* now do the bus related transforms */
irq = mca_device_transform_irq(mca_dev, irq);
printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops);
printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024);
p->dev = dev;
p->mem_base = mem_base;
p->phys_mem_base = base_addr;
p->mem_size = mem_size;
p->irq = irq;
p->siops = siops;
if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
goto out_release;
}
/* disable all the siop interrupts */
for(i = 0; i < siops; i++) {
void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1;
__u8 scsr1 = readb(reg_scsr1);
scsr1 |= 0x01;
writeb(scsr1, reg_scsr1);
}
/* plumb in all 720 chips */
for (i = 0; i < siops; i++) {
void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT;
__u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT;
__u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT;
int err;
outb(0xff, port + 0x40);
outb(0x07, port + 0x41);
if ((err = NCR_Q720_probe_one(p, i, irq, slot,
siop_p_base, siop_v_base)) != 0)
printk("Q720: SIOP%d: probe failed, error = %d\n",
i, err);
else
found++;
}
if (!found) {
kfree(p);
return -ENODEV;
}
mca_device_set_claim(mca_dev, 1);
mca_device_set_name(mca_dev, "NCR_Q720");
dev_set_drvdata(dev, p);
return 0;
out_release:
dma_release_declared_memory(dev);
out_release_region:
release_mem_region(base_addr, mem_size);
out_free:
kfree(p);
return -ENODEV;
}
static void __exit
NCR_Q720_remove_one(struct Scsi_Host *host)
{
scsi_remove_host(host);
ncr53c8xx_release(host);
}
static int __exit
NCR_Q720_remove(struct device *dev)
{
struct NCR_Q720_private *p = dev_get_drvdata(dev);
int i;
for (i = 0; i < p->siops; i++)
if(p->hosts[i])
NCR_Q720_remove_one(p->hosts[i]);
dma_release_declared_memory(dev);
release_mem_region(p->phys_mem_base, p->mem_size);
free_irq(p->irq, p);
kfree(p);
return 0;
}
static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
static struct mca_driver NCR_Q720_driver = {
.id_table = NCR_Q720_id_table,
.driver = {
.name = "NCR_Q720",
.bus = &mca_bus_type,
.probe = NCR_Q720_probe,
.remove = __devexit_p(NCR_Q720_remove),
},
};
static int __init
NCR_Q720_init(void)
{
int ret = ncr53c8xx_init();
if (!ret)
ret = mca_register_driver(&NCR_Q720_driver);
if (ret)
ncr53c8xx_exit();
return ret;
}
static void __exit
NCR_Q720_exit(void)
{
mca_unregister_driver(&NCR_Q720_driver);
ncr53c8xx_exit();
}
module_init(NCR_Q720_init);
module_exit(NCR_Q720_exit);
| gpl-2.0 |
namagi/android_kernel_motorola_msm8960-common | arch/x86/kernel/cpu/perf_event_p6.c | 2880 | 3485 | #ifdef CONFIG_CPU_SUP_INTEL
/*
* Not sure about some of these
*/
static const u64 p6_perfmon_event_map[] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
[PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
};
static u64 p6_pmu_event_map(int hw_event)
{
return p6_perfmon_event_map[hw_event];
}
/*
* Event setting that is specified not to count anything.
* We use this to effectively disable a counter.
*
* L2_RQSTS with 0 MESI unit mask.
*/
#define P6_NOP_EVENT 0x0000002EULL
static struct event_constraint p6_event_constraints[] =
{
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
EVENT_CONSTRAINT_END
};
static void p6_pmu_disable_all(void)
{
u64 val;
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(MSR_P6_EVNTSEL0, val);
}
static void p6_pmu_enable_all(int added)
{
unsigned long val;
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(MSR_P6_EVNTSEL0, val);
}
static inline void
p6_pmu_disable_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val = P6_NOP_EVENT;
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
(void)checking_wrmsrl(hwc->config_base, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
u64 val;
val = hwc->config;
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
(void)checking_wrmsrl(hwc->config_base, val);
}
static __initconst const struct x86_pmu p6_pmu = {
.name = "p6",
.handle_irq = x86_pmu_handle_irq,
.disable_all = p6_pmu_disable_all,
.enable_all = p6_pmu_enable_all,
.enable = p6_pmu_enable_event,
.disable = p6_pmu_disable_event,
.hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_P6_EVNTSEL0,
.perfctr = MSR_P6_PERFCTR0,
.event_map = p6_pmu_event_map,
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
.num_counters = 2,
/*
* Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
* effective width of a event for P6-like PMU is 32 bits only.
*
* See IA-32 Intel Architecture Software developer manual Vol 3B
*/
.cntval_bits = 32,
.cntval_mask = (1ULL << 32) - 1,
.get_event_constraints = x86_get_event_constraints,
.event_constraints = p6_event_constraints,
};
static __init int p6_pmu_init(void)
{
switch (boot_cpu_data.x86_model) {
case 1:
case 3: /* Pentium Pro */
case 5:
case 6: /* Pentium II */
case 7:
case 8:
case 11: /* Pentium III */
case 9:
case 13:
/* Pentium M */
break;
default:
pr_cont("unsupported p6 CPU model %d ",
boot_cpu_data.x86_model);
return -ENODEV;
}
x86_pmu = p6_pmu;
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */
| gpl-2.0 |
InfinitiveOS-Devices/kernel_xiaomi_armani | arch/mips/kernel/prom.c | 4416 | 2424 | /*
* MIPS support for CONFIG_OF device tree support
*
* Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/page.h>
#include <asm/prom.h>
int __init early_init_dt_scan_memory_arch(unsigned long node,
const char *uname, int depth,
void *data)
{
return early_init_dt_scan_memory(node, uname, depth, data);
}
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
}
int __init reserve_mem_mach(unsigned long addr, unsigned long size)
{
return reserve_bootmem(addr, size, BOOTMEM_DEFAULT);
}
void __init free_mem_mach(unsigned long addr, unsigned long size)
{
return free_bootmem(addr, size);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
#ifdef CONFIG_BLK_DEV_INITRD
void __init early_init_dt_setup_initrd_arch(unsigned long start,
unsigned long end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
initrd_below_start_ok = 1;
}
#endif
void __init early_init_devtree(void *params)
{
/* Setup flat device-tree pointer */
initial_boot_params = params;
/* Retrieve various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and
* size, and more ...
*/
of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline);
/* Scan memory nodes */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
}
void __init device_tree_init(void)
{
unsigned long base, size;
if (!initial_boot_params)
return;
base = virt_to_phys((void *)initial_boot_params);
size = be32_to_cpu(initial_boot_params->totalsize);
/* Before we do anything, lets reserve the dt blob */
reserve_mem_mach(base, size);
unflatten_device_tree();
/* free the space reserved for the dt blob */
free_mem_mach(base, size);
}
| gpl-2.0 |
eagleeyetom/android_kernel_oppo_msm8974 | arch/xtensa/kernel/time.c | 4672 | 2663 | /*
* arch/xtensa/kernel/time.c
*
* Timer and clock support.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <asm/timex.h>
#include <asm/platform.h>
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long nsec_per_ccount; /* nsec per ccount increment */
#endif
static cycle_t ccount_read(void)
{
return (cycle_t)get_ccount();
}
static struct clocksource ccount_clocksource = {
.name = "ccount",
.rating = 200,
.read = ccount_read,
.mask = CLOCKSOURCE_MASK(32),
};
static irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
.name = "timer",
};
void __init time_init(void)
{
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
platform_calibrate_ccount();
printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
(int)(ccount_per_jiffy/(10000/HZ))%100);
#endif
clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ);
/* Initialize the linux timer interrupt. */
setup_irq(LINUX_TIMER_INT, &timer_irqaction);
set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
}
/*
* The timer interrupt is called HZ times per second.
*/
irqreturn_t timer_interrupt (int irq, void *dev_id)
{
unsigned long next;
next = get_linux_timer();
again:
while ((signed long)(get_ccount() - next) > 0) {
profile_tick(CPU_PROFILING);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
xtime_update(1); /* Linux handler in kernel/time/timekeeping */
/* Note that writing CCOMPARE clears the interrupt. */
next += CCOUNT_PER_JIFFY;
set_linux_timer(next);
}
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
/* Make sure we didn't miss any tick... */
if ((signed long)(get_ccount() - next) > 0)
goto again;
return IRQ_HANDLED;
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
void __cpuinit calibrate_delay(void)
{
loops_per_jiffy = CCOUNT_PER_JIFFY;
printk("Calibrating delay loop (skipped)... "
"%lu.%02lu BogoMIPS preset\n",
loops_per_jiffy/(1000000/HZ),
(loops_per_jiffy/(10000/HZ)) % 100);
}
#endif
| gpl-2.0 |
digsig-ng/linux-digsig | arch/avr32/kernel/cpu.c | 7232 | 10329 | /*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/param.h>
#include <linux/errno.h>
#include <linux/clk.h>
#include <asm/setup.h>
#include <asm/sysreg.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_PERFORMANCE_COUNTERS
/*
* XXX: If/when a SMP-capable implementation of AVR32 will ever be
* made, we must make sure that the code executes on the correct CPU.
*/
static ssize_t show_pc0event(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f);
}
static ssize_t store_pc0event(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val > 0x3f)
return -EINVAL;
val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff);
sysreg_write(PCCR, val);
return count;
}
static ssize_t show_pc0count(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long pcnt0;
pcnt0 = sysreg_read(PCNT0);
return sprintf(buf, "%lu\n", pcnt0);
}
static ssize_t store_pc0count(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
sysreg_write(PCNT0, val);
return count;
}
static ssize_t show_pc1event(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f);
}
static ssize_t store_pc1event(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf || val > 0x3f)
return -EINVAL;
val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff);
sysreg_write(PCCR, val);
return count;
}
static ssize_t show_pc1count(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long pcnt1;
pcnt1 = sysreg_read(PCNT1);
return sprintf(buf, "%lu\n", pcnt1);
}
static ssize_t store_pc1count(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
sysreg_write(PCNT1, val);
return count;
}
static ssize_t show_pccycles(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long pccnt;
pccnt = sysreg_read(PCCNT);
return sprintf(buf, "%lu\n", pccnt);
}
static ssize_t store_pccycles(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
sysreg_write(PCCNT, val);
return count;
}
static ssize_t show_pcenable(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long pccr;
pccr = sysreg_read(PCCR);
return sprintf(buf, "%c\n", (pccr & 1)?'1':'0');
}
static ssize_t store_pcenable(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
unsigned long pccr, val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
if (val)
val = 1;
pccr = sysreg_read(PCCR);
pccr = (pccr & ~1UL) | val;
sysreg_write(PCCR, pccr);
return count;
}
static DEVICE_ATTR(pc0event, 0600, show_pc0event, store_pc0event);
static DEVICE_ATTR(pc0count, 0600, show_pc0count, store_pc0count);
static DEVICE_ATTR(pc1event, 0600, show_pc1event, store_pc1event);
static DEVICE_ATTR(pc1count, 0600, show_pc1count, store_pc1count);
static DEVICE_ATTR(pccycles, 0600, show_pccycles, store_pccycles);
static DEVICE_ATTR(pcenable, 0600, show_pcenable, store_pcenable);
#endif /* CONFIG_PERFORMANCE_COUNTERS */
static int __init topology_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
register_cpu(c, cpu);
#ifdef CONFIG_PERFORMANCE_COUNTERS
device_create_file(&c->dev, &dev_attr_pc0event);
device_create_file(&c->dev, &dev_attr_pc0count);
device_create_file(&c->dev, &dev_attr_pc1event);
device_create_file(&c->dev, &dev_attr_pc1count);
device_create_file(&c->dev, &dev_attr_pccycles);
device_create_file(&c->dev, &dev_attr_pcenable);
#endif
}
return 0;
}
subsys_initcall(topology_init);
struct chip_id_map {
u16 mid;
u16 pn;
const char *name;
};
static const struct chip_id_map chip_names[] = {
{ .mid = 0x1f, .pn = 0x1e82, .name = "AT32AP700x" },
};
#define NR_CHIP_NAMES ARRAY_SIZE(chip_names)
static const char *cpu_names[] = {
"Morgan",
"AP7",
};
#define NR_CPU_NAMES ARRAY_SIZE(cpu_names)
static const char *arch_names[] = {
"AVR32A",
"AVR32B",
};
#define NR_ARCH_NAMES ARRAY_SIZE(arch_names)
static const char *mmu_types[] = {
"No MMU",
"ITLB and DTLB",
"Shared TLB",
"MPU"
};
static const char *cpu_feature_flags[] = {
"rmw", "dsp", "simd", "ocd", "perfctr", "java", "fpu",
};
static const char *get_chip_name(struct avr32_cpuinfo *cpu)
{
unsigned int i;
unsigned int mid = avr32_get_manufacturer_id(cpu);
unsigned int pn = avr32_get_product_number(cpu);
for (i = 0; i < NR_CHIP_NAMES; i++) {
if (chip_names[i].mid == mid && chip_names[i].pn == pn)
return chip_names[i].name;
}
return "(unknown)";
}
void __init setup_processor(void)
{
unsigned long config0, config1;
unsigned long features;
unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type;
unsigned device_id;
unsigned tmp;
unsigned i;
config0 = sysreg_read(CONFIG0);
config1 = sysreg_read(CONFIG1);
cpu_id = SYSREG_BFEXT(PROCESSORID, config0);
cpu_rev = SYSREG_BFEXT(PROCESSORREVISION, config0);
arch_id = SYSREG_BFEXT(AT, config0);
arch_rev = SYSREG_BFEXT(AR, config0);
mmu_type = SYSREG_BFEXT(MMUT, config0);
device_id = ocd_read(DID);
boot_cpu_data.arch_type = arch_id;
boot_cpu_data.cpu_type = cpu_id;
boot_cpu_data.arch_revision = arch_rev;
boot_cpu_data.cpu_revision = cpu_rev;
boot_cpu_data.tlb_config = mmu_type;
boot_cpu_data.device_id = device_id;
tmp = SYSREG_BFEXT(ILSZ, config1);
if (tmp) {
boot_cpu_data.icache.ways = 1 << SYSREG_BFEXT(IASS, config1);
boot_cpu_data.icache.sets = 1 << SYSREG_BFEXT(ISET, config1);
boot_cpu_data.icache.linesz = 1 << (tmp + 1);
}
tmp = SYSREG_BFEXT(DLSZ, config1);
if (tmp) {
boot_cpu_data.dcache.ways = 1 << SYSREG_BFEXT(DASS, config1);
boot_cpu_data.dcache.sets = 1 << SYSREG_BFEXT(DSET, config1);
boot_cpu_data.dcache.linesz = 1 << (tmp + 1);
}
if ((cpu_id >= NR_CPU_NAMES) || (arch_id >= NR_ARCH_NAMES)) {
printk ("Unknown CPU configuration (ID %02x, arch %02x), "
"continuing anyway...\n",
cpu_id, arch_id);
return;
}
printk ("CPU: %s chip revision %c\n", get_chip_name(&boot_cpu_data),
avr32_get_chip_revision(&boot_cpu_data) + 'A');
printk ("CPU: %s [%02x] core revision %d (%s arch revision %d)\n",
cpu_names[cpu_id], cpu_id, cpu_rev,
arch_names[arch_id], arch_rev);
printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]);
printk ("CPU: features:");
features = 0;
if (config0 & SYSREG_BIT(CONFIG0_R))
features |= AVR32_FEATURE_RMW;
if (config0 & SYSREG_BIT(CONFIG0_D))
features |= AVR32_FEATURE_DSP;
if (config0 & SYSREG_BIT(CONFIG0_S))
features |= AVR32_FEATURE_SIMD;
if (config0 & SYSREG_BIT(CONFIG0_O))
features |= AVR32_FEATURE_OCD;
if (config0 & SYSREG_BIT(CONFIG0_P))
features |= AVR32_FEATURE_PCTR;
if (config0 & SYSREG_BIT(CONFIG0_J))
features |= AVR32_FEATURE_JAVA;
if (config0 & SYSREG_BIT(CONFIG0_F))
features |= AVR32_FEATURE_FPU;
for (i = 0; i < ARRAY_SIZE(cpu_feature_flags); i++)
if (features & (1 << i))
printk(" %s", cpu_feature_flags[i]);
printk("\n");
boot_cpu_data.features = features;
}
#ifdef CONFIG_PROC_FS
static int c_show(struct seq_file *m, void *v)
{
unsigned int icache_size, dcache_size;
unsigned int cpu = smp_processor_id();
unsigned int freq;
unsigned int i;
icache_size = boot_cpu_data.icache.ways *
boot_cpu_data.icache.sets *
boot_cpu_data.icache.linesz;
dcache_size = boot_cpu_data.dcache.ways *
boot_cpu_data.dcache.sets *
boot_cpu_data.dcache.linesz;
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "chip type\t: %s revision %c\n",
get_chip_name(&boot_cpu_data),
avr32_get_chip_revision(&boot_cpu_data) + 'A');
if (boot_cpu_data.arch_type < NR_ARCH_NAMES)
seq_printf(m, "cpu arch\t: %s revision %d\n",
arch_names[boot_cpu_data.arch_type],
boot_cpu_data.arch_revision);
if (boot_cpu_data.cpu_type < NR_CPU_NAMES)
seq_printf(m, "cpu core\t: %s revision %d\n",
cpu_names[boot_cpu_data.cpu_type],
boot_cpu_data.cpu_revision);
freq = (clk_get_rate(boot_cpu_data.clk) + 500) / 1000;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n", freq / 1000, freq % 1000);
seq_printf(m, "i-cache\t\t: %dK (%u ways x %u sets x %u)\n",
icache_size >> 10,
boot_cpu_data.icache.ways,
boot_cpu_data.icache.sets,
boot_cpu_data.icache.linesz);
seq_printf(m, "d-cache\t\t: %dK (%u ways x %u sets x %u)\n",
dcache_size >> 10,
boot_cpu_data.dcache.ways,
boot_cpu_data.dcache.sets,
boot_cpu_data.dcache.linesz);
seq_printf(m, "features\t:");
for (i = 0; i < ARRAY_SIZE(cpu_feature_flags); i++)
if (boot_cpu_data.features & (1 << i))
seq_printf(m, " %s", cpu_feature_flags[i]);
seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
boot_cpu_data.loops_per_jiffy / (500000/HZ),
(boot_cpu_data.loops_per_jiffy / (5000/HZ)) % 100);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < 1 ? (void *)1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = c_show
};
#endif /* CONFIG_PROC_FS */
| gpl-2.0 |
ccotter/linux-deterministic | net/mac80211/rc80211_pid_algo.c | 7488 | 14952 | /*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
* Copyright 2007-2008, Stefano Brivio <stefano.brivio@polimi.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include "rate.h"
#include "mesh.h"
#include "rc80211_pid.h"
/* This is an implementation of a TX rate control algorithm that uses a PID
* controller. Given a target failed frames rate, the controller decides about
* TX rate changes to meet the target failed frames rate.
*
* The controller basically computes the following:
*
* adj = CP * err + CI * err_avg + CD * (err - last_err) * (1 + sharpening)
*
* where
* adj adjustment value that is used to switch TX rate (see below)
* err current error: target vs. current failed frames percentage
* last_err last error
* err_avg average (i.e. poor man's integral) of recent errors
* sharpening non-zero when fast response is needed (i.e. right after
* association or no frames sent for a long time), heading
* to zero over time
* CP Proportional coefficient
* CI Integral coefficient
* CD Derivative coefficient
*
* CP, CI, CD are subject to careful tuning.
*
* The integral component uses a exponential moving average approach instead of
* an actual sliding window. The advantage is that we don't need to keep an
* array of the last N error values and computation is easier.
*
* Once we have the adj value, we map it to a rate by means of a learning
* algorithm. This algorithm keeps the state of the percentual failed frames
* difference between rates. The behaviour of the lowest available rate is kept
* as a reference value, and every time we switch between two rates, we compute
* the difference between the failed frames each rate exhibited. By doing so,
* we compare behaviours which different rates exhibited in adjacent timeslices,
* thus the comparison is minimally affected by external conditions. This
* difference gets propagated to the whole set of measurements, so that the
* reference is always the same. Periodically, we normalize this set so that
* recent events weigh the most. By comparing the adj value with this set, we
* avoid pejorative switches to lower rates and allow for switches to higher
* rates if they behaved well.
*
* Note that for the computations we use a fixed-point representation to avoid
* floating point arithmetic. Hence, all values are shifted left by
* RC_PID_ARITH_SHIFT.
*/
/* Adjust the rate while ensuring that we won't switch to a lower rate if it
* exhibited a worse failed frames behaviour and we'll choose the highest rate
* whose failed frames behaviour is not worse than the one of the original rate
* target. While at it, check that the new rate is valid. */
static void rate_control_pid_adjust_rate(struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta,
struct rc_pid_sta_info *spinfo, int adj,
struct rc_pid_rateinfo *rinfo)
{
int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
int cur = spinfo->txrate_idx;
band = sband->band;
n_bitrates = sband->n_bitrates;
/* Map passed arguments to sorted values. */
cur_sorted = rinfo[cur].rev_index;
new_sorted = cur_sorted + adj;
/* Check limits. */
if (new_sorted < 0)
new_sorted = rinfo[0].rev_index;
else if (new_sorted >= n_bitrates)
new_sorted = rinfo[n_bitrates - 1].rev_index;
tmp = new_sorted;
if (adj < 0) {
/* Ensure that the rate decrease isn't disadvantageous. */
for (probe = cur_sorted; probe >= new_sorted; probe--)
if (rinfo[probe].diff <= rinfo[cur_sorted].diff &&
rate_supported(sta, band, rinfo[probe].index))
tmp = probe;
} else {
/* Look for rate increase with zero (or below) cost. */
for (probe = new_sorted + 1; probe < n_bitrates; probe++)
if (rinfo[probe].diff <= rinfo[new_sorted].diff &&
rate_supported(sta, band, rinfo[probe].index))
tmp = probe;
}
/* Fit the rate found to the nearest supported rate. */
do {
if (rate_supported(sta, band, rinfo[tmp].index)) {
spinfo->txrate_idx = rinfo[tmp].index;
break;
}
if (adj < 0)
tmp--;
else
tmp++;
} while (tmp < n_bitrates && tmp >= 0);
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_rate_change(&spinfo->events,
spinfo->txrate_idx,
sband->bitrates[spinfo->txrate_idx].bitrate);
#endif
}
/* Normalize the failed frames per-rate differences. */
static void rate_control_pid_normalize(struct rc_pid_info *pinfo, int l)
{
int i, norm_offset = pinfo->norm_offset;
struct rc_pid_rateinfo *r = pinfo->rinfo;
if (r[0].diff > norm_offset)
r[0].diff -= norm_offset;
else if (r[0].diff < -norm_offset)
r[0].diff += norm_offset;
for (i = 0; i < l - 1; i++)
if (r[i + 1].diff > r[i].diff + norm_offset)
r[i + 1].diff -= norm_offset;
else if (r[i + 1].diff <= r[i].diff)
r[i + 1].diff += norm_offset;
}
static void rate_control_pid_sample(struct rc_pid_info *pinfo,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta,
struct rc_pid_sta_info *spinfo)
{
struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
u32 pf;
s32 err_avg;
u32 err_prop;
u32 err_int;
u32 err_der;
int adj, i, j, tmp;
unsigned long period;
/* In case nothing happened during the previous control interval, turn
* the sharpening factor on. */
period = msecs_to_jiffies(pinfo->sampling_period);
if (jiffies - spinfo->last_sample > 2 * period)
spinfo->sharp_cnt = pinfo->sharpen_duration;
spinfo->last_sample = jiffies;
/* This should never happen, but in case, we assume the old sample is
* still a good measurement and copy it. */
if (unlikely(spinfo->tx_num_xmit == 0))
pf = spinfo->last_pf;
else
pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
spinfo->tx_num_xmit = 0;
spinfo->tx_num_failed = 0;
/* If we just switched rate, update the rate behaviour info. */
if (pinfo->oldrate != spinfo->txrate_idx) {
i = rinfo[pinfo->oldrate].rev_index;
j = rinfo[spinfo->txrate_idx].rev_index;
tmp = (pf - spinfo->last_pf);
tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
rinfo[j].diff = rinfo[i].diff + tmp;
pinfo->oldrate = spinfo->txrate_idx;
}
rate_control_pid_normalize(pinfo, sband->n_bitrates);
/* Compute the proportional, integral and derivative errors. */
err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT;
err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift;
spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
err_int = spinfo->err_avg_sc >> pinfo->smoothing_shift;
err_der = (pf - spinfo->last_pf) *
(1 + pinfo->sharpen_factor * spinfo->sharp_cnt);
spinfo->last_pf = pf;
if (spinfo->sharp_cnt)
spinfo->sharp_cnt--;
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_pf_sample(&spinfo->events, pf, err_prop, err_int,
err_der);
#endif
/* Compute the controller output. */
adj = (err_prop * pinfo->coeff_p + err_int * pinfo->coeff_i
+ err_der * pinfo->coeff_d);
adj = RC_PID_DO_ARITH_RIGHT_SHIFT(adj, 2 * RC_PID_ARITH_SHIFT);
/* Change rate. */
if (adj)
rate_control_pid_adjust_rate(sband, sta, spinfo, adj, rinfo);
}
static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
struct rc_pid_info *pinfo = priv;
struct rc_pid_sta_info *spinfo = priv_sta;
unsigned long period;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!spinfo)
return;
/* Ignore all frames that were sent with a different rate than the rate
* we currently advise mac80211 to use. */
if (info->status.rates[0].idx != spinfo->txrate_idx)
return;
spinfo->tx_num_xmit++;
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_tx_status(&spinfo->events, info);
#endif
/* We count frames that totally failed to be transmitted as two bad
* frames, those that made it out but had some retries as one good and
* one bad frame. */
if (!(info->flags & IEEE80211_TX_STAT_ACK)) {
spinfo->tx_num_failed += 2;
spinfo->tx_num_xmit++;
} else if (info->status.rates[0].count > 1) {
spinfo->tx_num_failed++;
spinfo->tx_num_xmit++;
}
/* Update PID controller state. */
period = msecs_to_jiffies(pinfo->sampling_period);
if (time_after(jiffies, spinfo->last_sample + period))
rate_control_pid_sample(pinfo, sband, sta, spinfo);
}
static void
rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rc_pid_sta_info *spinfo = priv_sta;
int rateidx;
if (txrc->rts)
info->control.rates[0].count =
txrc->hw->conf.long_frame_max_tx_count;
else
info->control.rates[0].count =
txrc->hw->conf.short_frame_max_tx_count;
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, priv_sta, txrc))
return;
rateidx = spinfo->txrate_idx;
if (rateidx >= sband->n_bitrates)
rateidx = sband->n_bitrates - 1;
info->control.rates[0].idx = rateidx;
#ifdef CONFIG_MAC80211_DEBUGFS
rate_control_pid_event_tx_rate(&spinfo->events,
rateidx, sband->bitrates[rateidx].bitrate);
#endif
}
static void
rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rc_pid_sta_info *spinfo = priv_sta;
struct rc_pid_info *pinfo = priv;
struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
int i, j, tmp;
bool s;
/* TODO: This routine should consider using RSSI from previous packets
* as we need to have IEEE 802.1X auth succeed immediately after assoc..
* Until that method is implemented, we will use the lowest supported
* rate as a workaround. */
/* Sort the rates. This is optimized for the most common case (i.e.
* almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
* mapping too. */
for (i = 0; i < sband->n_bitrates; i++) {
rinfo[i].index = i;
rinfo[i].rev_index = i;
if (RC_PID_FAST_START)
rinfo[i].diff = 0;
else
rinfo[i].diff = i * pinfo->norm_offset;
}
for (i = 1; i < sband->n_bitrates; i++) {
s = false;
for (j = 0; j < sband->n_bitrates - i; j++)
if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
sband->bitrates[rinfo[j + 1].index].bitrate)) {
tmp = rinfo[j].index;
rinfo[j].index = rinfo[j + 1].index;
rinfo[j + 1].index = tmp;
rinfo[rinfo[j].index].rev_index = j;
rinfo[rinfo[j + 1].index].rev_index = j + 1;
s = true;
}
if (!s)
break;
}
spinfo->txrate_idx = rate_lowest_index(sband, sta);
}
static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
struct dentry *debugfsdir)
{
struct rc_pid_info *pinfo;
struct rc_pid_rateinfo *rinfo;
struct ieee80211_supported_band *sband;
int i, max_rates = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
struct rc_pid_debugfs_entries *de;
#endif
pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
if (!pinfo)
return NULL;
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
sband = hw->wiphy->bands[i];
if (sband && sband->n_bitrates > max_rates)
max_rates = sband->n_bitrates;
}
rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
if (!rinfo) {
kfree(pinfo);
return NULL;
}
pinfo->target = RC_PID_TARGET_PF;
pinfo->sampling_period = RC_PID_INTERVAL;
pinfo->coeff_p = RC_PID_COEFF_P;
pinfo->coeff_i = RC_PID_COEFF_I;
pinfo->coeff_d = RC_PID_COEFF_D;
pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT;
pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR;
pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION;
pinfo->norm_offset = RC_PID_NORM_OFFSET;
pinfo->rinfo = rinfo;
pinfo->oldrate = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
de = &pinfo->dentries;
de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
debugfsdir, &pinfo->target);
de->sampling_period = debugfs_create_u32("sampling_period",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->sampling_period);
de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR,
debugfsdir, (u32 *)&pinfo->coeff_p);
de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR,
debugfsdir, (u32 *)&pinfo->coeff_i);
de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR,
debugfsdir, (u32 *)&pinfo->coeff_d);
de->smoothing_shift = debugfs_create_u32("smoothing_shift",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->smoothing_shift);
de->sharpen_factor = debugfs_create_u32("sharpen_factor",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->sharpen_factor);
de->sharpen_duration = debugfs_create_u32("sharpen_duration",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->sharpen_duration);
de->norm_offset = debugfs_create_u32("norm_offset",
S_IRUSR | S_IWUSR, debugfsdir,
&pinfo->norm_offset);
#endif
return pinfo;
}
static void rate_control_pid_free(void *priv)
{
struct rc_pid_info *pinfo = priv;
#ifdef CONFIG_MAC80211_DEBUGFS
struct rc_pid_debugfs_entries *de = &pinfo->dentries;
debugfs_remove(de->norm_offset);
debugfs_remove(de->sharpen_duration);
debugfs_remove(de->sharpen_factor);
debugfs_remove(de->smoothing_shift);
debugfs_remove(de->coeff_d);
debugfs_remove(de->coeff_i);
debugfs_remove(de->coeff_p);
debugfs_remove(de->sampling_period);
debugfs_remove(de->target);
#endif
kfree(pinfo->rinfo);
kfree(pinfo);
}
static void *rate_control_pid_alloc_sta(void *priv, struct ieee80211_sta *sta,
gfp_t gfp)
{
struct rc_pid_sta_info *spinfo;
spinfo = kzalloc(sizeof(*spinfo), gfp);
if (spinfo == NULL)
return NULL;
spinfo->last_sample = jiffies;
#ifdef CONFIG_MAC80211_DEBUGFS
spin_lock_init(&spinfo->events.lock);
init_waitqueue_head(&spinfo->events.waitqueue);
#endif
return spinfo;
}
static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
void *priv_sta)
{
kfree(priv_sta);
}
static struct rate_control_ops mac80211_rcpid = {
.name = "pid",
.tx_status = rate_control_pid_tx_status,
.get_rate = rate_control_pid_get_rate,
.rate_init = rate_control_pid_rate_init,
.alloc = rate_control_pid_alloc,
.free = rate_control_pid_free,
.alloc_sta = rate_control_pid_alloc_sta,
.free_sta = rate_control_pid_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rate_control_pid_add_sta_debugfs,
.remove_sta_debugfs = rate_control_pid_remove_sta_debugfs,
#endif
};
int __init rc80211_pid_init(void)
{
return ieee80211_rate_control_register(&mac80211_rcpid);
}
void rc80211_pid_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_rcpid);
}
| gpl-2.0 |
MasterAwesome/android_kernel_oneplus_msm8974 | fs/ntfs/sysctl.c | 12096 | 2309 | /*
* sysctl.c - Code for sysctl handling in NTFS Linux kernel driver. Part of
* the Linux-NTFS project. Adapted from the old NTFS driver,
* Copyright (C) 1997 Martin von Löwis, Régis Duchesne
*
* Copyright (c) 2002-2005 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef DEBUG
#include <linux/module.h>
#ifdef CONFIG_SYSCTL
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include "sysctl.h"
#include "debug.h"
/* Definition of the ntfs sysctl. */
static ctl_table ntfs_sysctls[] = {
{
.procname = "ntfs-debug",
.data = &debug_msgs, /* Data pointer and size. */
.maxlen = sizeof(debug_msgs),
.mode = 0644, /* Mode, proc handler. */
.proc_handler = proc_dointvec
},
{}
};
/* Define the parent directory /proc/sys/fs. */
static ctl_table sysctls_root[] = {
{
.procname = "fs",
.mode = 0555,
.child = ntfs_sysctls
},
{}
};
/* Storage for the sysctls header. */
static struct ctl_table_header *sysctls_root_table = NULL;
/**
* ntfs_sysctl - add or remove the debug sysctl
* @add: add (1) or remove (0) the sysctl
*
* Add or remove the debug sysctl. Return 0 on success or -errno on error.
*/
int ntfs_sysctl(int add)
{
if (add) {
BUG_ON(sysctls_root_table);
sysctls_root_table = register_sysctl_table(sysctls_root);
if (!sysctls_root_table)
return -ENOMEM;
} else {
BUG_ON(!sysctls_root_table);
unregister_sysctl_table(sysctls_root_table);
sysctls_root_table = NULL;
}
return 0;
}
#endif /* CONFIG_SYSCTL */
#endif /* DEBUG */
| gpl-2.0 |
philippedeswert/linux-pandaboard | fs/ntfs/sysctl.c | 12096 | 2309 | /*
* sysctl.c - Code for sysctl handling in NTFS Linux kernel driver. Part of
* the Linux-NTFS project. Adapted from the old NTFS driver,
* Copyright (C) 1997 Martin von Löwis, Régis Duchesne
*
* Copyright (c) 2002-2005 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef DEBUG
#include <linux/module.h>
#ifdef CONFIG_SYSCTL
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include "sysctl.h"
#include "debug.h"
/* Definition of the ntfs sysctl. */
static ctl_table ntfs_sysctls[] = {
{
.procname = "ntfs-debug",
.data = &debug_msgs, /* Data pointer and size. */
.maxlen = sizeof(debug_msgs),
.mode = 0644, /* Mode, proc handler. */
.proc_handler = proc_dointvec
},
{}
};
/* Define the parent directory /proc/sys/fs. */
static ctl_table sysctls_root[] = {
{
.procname = "fs",
.mode = 0555,
.child = ntfs_sysctls
},
{}
};
/* Storage for the sysctls header. */
static struct ctl_table_header *sysctls_root_table = NULL;
/**
* ntfs_sysctl - add or remove the debug sysctl
* @add: add (1) or remove (0) the sysctl
*
* Add or remove the debug sysctl. Return 0 on success or -errno on error.
*/
int ntfs_sysctl(int add)
{
if (add) {
BUG_ON(sysctls_root_table);
sysctls_root_table = register_sysctl_table(sysctls_root);
if (!sysctls_root_table)
return -ENOMEM;
} else {
BUG_ON(!sysctls_root_table);
unregister_sysctl_table(sysctls_root_table);
sysctls_root_table = NULL;
}
return 0;
}
#endif /* CONFIG_SYSCTL */
#endif /* DEBUG */
| gpl-2.0 |
CyanogenMod/android_kernel_goldfish | sound/pci/pcxhr/pcxhr_mixer.c | 12608 | 36943 | #define __NO_VERSION__
/*
* Driver for Digigram pcxhr compatible soundcards
*
* mixer callbacks
*
* Copyright (c) 2004 by Digigram <alsa@digigram.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include "pcxhr.h"
#include "pcxhr_hwdep.h"
#include "pcxhr_core.h"
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/asoundef.h>
#include "pcxhr_mixer.h"
#include "pcxhr_mix22.h"
#define PCXHR_LINE_CAPTURE_LEVEL_MIN 0 /* -112.0 dB */
#define PCXHR_LINE_CAPTURE_LEVEL_MAX 255 /* +15.5 dB */
#define PCXHR_LINE_CAPTURE_ZERO_LEVEL 224 /* 0.0 dB ( 0 dBu -> 0 dBFS ) */
#define PCXHR_LINE_PLAYBACK_LEVEL_MIN 0 /* -104.0 dB */
#define PCXHR_LINE_PLAYBACK_LEVEL_MAX 128 /* +24.0 dB */
#define PCXHR_LINE_PLAYBACK_ZERO_LEVEL 104 /* 0.0 dB ( 0 dBFS -> 0 dBu ) */
static const DECLARE_TLV_DB_SCALE(db_scale_analog_capture, -11200, 50, 1550);
static const DECLARE_TLV_DB_SCALE(db_scale_analog_playback, -10400, 100, 2400);
static const DECLARE_TLV_DB_SCALE(db_scale_a_hr222_capture, -11150, 50, 1600);
static const DECLARE_TLV_DB_SCALE(db_scale_a_hr222_playback, -2550, 50, 2400);
static int pcxhr_update_analog_audio_level(struct snd_pcxhr *chip,
int is_capture, int channel)
{
int err, vol;
struct pcxhr_rmh rmh;
pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE);
if (is_capture) {
rmh.cmd[0] |= IO_NUM_REG_IN_ANA_LEVEL;
rmh.cmd[2] = chip->analog_capture_volume[channel];
} else {
rmh.cmd[0] |= IO_NUM_REG_OUT_ANA_LEVEL;
if (chip->analog_playback_active[channel])
vol = chip->analog_playback_volume[channel];
else
vol = PCXHR_LINE_PLAYBACK_LEVEL_MIN;
/* playback analog levels are inversed */
rmh.cmd[2] = PCXHR_LINE_PLAYBACK_LEVEL_MAX - vol;
}
rmh.cmd[1] = 1 << ((2 * chip->chip_idx) + channel); /* audio mask */
rmh.cmd_len = 3;
err = pcxhr_send_msg(chip->mgr, &rmh);
if (err < 0) {
snd_printk(KERN_DEBUG "error update_analog_audio_level card(%d)"
" is_capture(%d) err(%x)\n",
chip->chip_idx, is_capture, err);
return -EINVAL;
}
return 0;
}
/*
* analog level control
*/
static int pcxhr_analog_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
if (kcontrol->private_value == 0) { /* playback */
if (chip->mgr->is_hr_stereo) {
uinfo->value.integer.min =
HR222_LINE_PLAYBACK_LEVEL_MIN; /* -25 dB */
uinfo->value.integer.max =
HR222_LINE_PLAYBACK_LEVEL_MAX; /* +24 dB */
} else {
uinfo->value.integer.min =
PCXHR_LINE_PLAYBACK_LEVEL_MIN; /*-104 dB */
uinfo->value.integer.max =
PCXHR_LINE_PLAYBACK_LEVEL_MAX; /* +24 dB */
}
} else { /* capture */
if (chip->mgr->is_hr_stereo) {
uinfo->value.integer.min =
HR222_LINE_CAPTURE_LEVEL_MIN; /*-112 dB */
uinfo->value.integer.max =
HR222_LINE_CAPTURE_LEVEL_MAX; /* +15.5 dB */
} else {
uinfo->value.integer.min =
PCXHR_LINE_CAPTURE_LEVEL_MIN; /*-112 dB */
uinfo->value.integer.max =
PCXHR_LINE_CAPTURE_LEVEL_MAX; /* +15.5 dB */
}
}
return 0;
}
static int pcxhr_analog_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->mgr->mixer_mutex);
if (kcontrol->private_value == 0) { /* playback */
ucontrol->value.integer.value[0] = chip->analog_playback_volume[0];
ucontrol->value.integer.value[1] = chip->analog_playback_volume[1];
} else { /* capture */
ucontrol->value.integer.value[0] = chip->analog_capture_volume[0];
ucontrol->value.integer.value[1] = chip->analog_capture_volume[1];
}
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int pcxhr_analog_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
int is_capture, i;
mutex_lock(&chip->mgr->mixer_mutex);
is_capture = (kcontrol->private_value != 0);
for (i = 0; i < 2; i++) {
int new_volume = ucontrol->value.integer.value[i];
int *stored_volume = is_capture ?
&chip->analog_capture_volume[i] :
&chip->analog_playback_volume[i];
if (is_capture) {
if (chip->mgr->is_hr_stereo) {
if (new_volume < HR222_LINE_CAPTURE_LEVEL_MIN ||
new_volume > HR222_LINE_CAPTURE_LEVEL_MAX)
continue;
} else {
if (new_volume < PCXHR_LINE_CAPTURE_LEVEL_MIN ||
new_volume > PCXHR_LINE_CAPTURE_LEVEL_MAX)
continue;
}
} else {
if (chip->mgr->is_hr_stereo) {
if (new_volume < HR222_LINE_PLAYBACK_LEVEL_MIN ||
new_volume > HR222_LINE_PLAYBACK_LEVEL_MAX)
continue;
} else {
if (new_volume < PCXHR_LINE_PLAYBACK_LEVEL_MIN ||
new_volume > PCXHR_LINE_PLAYBACK_LEVEL_MAX)
continue;
}
}
if (*stored_volume != new_volume) {
*stored_volume = new_volume;
changed = 1;
if (chip->mgr->is_hr_stereo)
hr222_update_analog_audio_level(chip,
is_capture, i);
else
pcxhr_update_analog_audio_level(chip,
is_capture, i);
}
}
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new pcxhr_control_analog_level = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
/* name will be filled later */
.info = pcxhr_analog_vol_info,
.get = pcxhr_analog_vol_get,
.put = pcxhr_analog_vol_put,
/* tlv will be filled later */
};
/* shared */
#define pcxhr_sw_info snd_ctl_boolean_stereo_info
static int pcxhr_audio_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->mgr->mixer_mutex);
ucontrol->value.integer.value[0] = chip->analog_playback_active[0];
ucontrol->value.integer.value[1] = chip->analog_playback_active[1];
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int pcxhr_audio_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int i, changed = 0;
mutex_lock(&chip->mgr->mixer_mutex);
for(i = 0; i < 2; i++) {
if (chip->analog_playback_active[i] !=
ucontrol->value.integer.value[i]) {
chip->analog_playback_active[i] =
!!ucontrol->value.integer.value[i];
changed = 1;
/* update playback levels */
if (chip->mgr->is_hr_stereo)
hr222_update_analog_audio_level(chip, 0, i);
else
pcxhr_update_analog_audio_level(chip, 0, i);
}
}
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new pcxhr_control_output_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = pcxhr_sw_info, /* shared */
.get = pcxhr_audio_sw_get,
.put = pcxhr_audio_sw_put
};
#define PCXHR_DIGITAL_LEVEL_MIN 0x000 /* -110 dB */
#define PCXHR_DIGITAL_LEVEL_MAX 0x1ff /* +18 dB */
#define PCXHR_DIGITAL_ZERO_LEVEL 0x1b7 /* 0 dB */
static const DECLARE_TLV_DB_SCALE(db_scale_digital, -10975, 25, 1800);
#define MORE_THAN_ONE_STREAM_LEVEL 0x000001
#define VALID_STREAM_PAN_LEVEL_MASK 0x800000
#define VALID_STREAM_LEVEL_MASK 0x400000
#define VALID_STREAM_LEVEL_1_MASK 0x200000
#define VALID_STREAM_LEVEL_2_MASK 0x100000
static int pcxhr_update_playback_stream_level(struct snd_pcxhr* chip, int idx)
{
int err;
struct pcxhr_rmh rmh;
struct pcxhr_pipe *pipe = &chip->playback_pipe;
int left, right;
if (chip->digital_playback_active[idx][0])
left = chip->digital_playback_volume[idx][0];
else
left = PCXHR_DIGITAL_LEVEL_MIN;
if (chip->digital_playback_active[idx][1])
right = chip->digital_playback_volume[idx][1];
else
right = PCXHR_DIGITAL_LEVEL_MIN;
pcxhr_init_rmh(&rmh, CMD_STREAM_OUT_LEVEL_ADJUST);
/* add pipe and stream mask */
pcxhr_set_pipe_cmd_params(&rmh, 0, pipe->first_audio, 0, 1<<idx);
/* volume left->left / right->right panoramic level */
rmh.cmd[0] |= MORE_THAN_ONE_STREAM_LEVEL;
rmh.cmd[2] = VALID_STREAM_PAN_LEVEL_MASK | VALID_STREAM_LEVEL_1_MASK;
rmh.cmd[2] |= (left << 10);
rmh.cmd[3] = VALID_STREAM_PAN_LEVEL_MASK | VALID_STREAM_LEVEL_2_MASK;
rmh.cmd[3] |= right;
rmh.cmd_len = 4;
err = pcxhr_send_msg(chip->mgr, &rmh);
if (err < 0) {
snd_printk(KERN_DEBUG "error update_playback_stream_level "
"card(%d) err(%x)\n", chip->chip_idx, err);
return -EINVAL;
}
return 0;
}
#define AUDIO_IO_HAS_MUTE_LEVEL 0x400000
#define AUDIO_IO_HAS_MUTE_MONITOR_1 0x200000
#define VALID_AUDIO_IO_DIGITAL_LEVEL 0x000001
#define VALID_AUDIO_IO_MONITOR_LEVEL 0x000002
#define VALID_AUDIO_IO_MUTE_LEVEL 0x000004
#define VALID_AUDIO_IO_MUTE_MONITOR_1 0x000008
static int pcxhr_update_audio_pipe_level(struct snd_pcxhr *chip,
int capture, int channel)
{
int err;
struct pcxhr_rmh rmh;
struct pcxhr_pipe *pipe;
if (capture)
pipe = &chip->capture_pipe[0];
else
pipe = &chip->playback_pipe;
pcxhr_init_rmh(&rmh, CMD_AUDIO_LEVEL_ADJUST);
/* add channel mask */
pcxhr_set_pipe_cmd_params(&rmh, capture, 0, 0,
1 << (channel + pipe->first_audio));
/* TODO : if mask (3 << pipe->first_audio) is used, left and right
* channel will be programmed to the same params */
if (capture) {
rmh.cmd[0] |= VALID_AUDIO_IO_DIGITAL_LEVEL;
/* VALID_AUDIO_IO_MUTE_LEVEL not yet handled
* (capture pipe level) */
rmh.cmd[2] = chip->digital_capture_volume[channel];
} else {
rmh.cmd[0] |= VALID_AUDIO_IO_MONITOR_LEVEL |
VALID_AUDIO_IO_MUTE_MONITOR_1;
/* VALID_AUDIO_IO_DIGITAL_LEVEL and VALID_AUDIO_IO_MUTE_LEVEL
* not yet handled (playback pipe level)
*/
rmh.cmd[2] = chip->monitoring_volume[channel] << 10;
if (chip->monitoring_active[channel] == 0)
rmh.cmd[2] |= AUDIO_IO_HAS_MUTE_MONITOR_1;
}
rmh.cmd_len = 3;
err = pcxhr_send_msg(chip->mgr, &rmh);
if (err < 0) {
snd_printk(KERN_DEBUG "error update_audio_level(%d) err=%x\n",
chip->chip_idx, err);
return -EINVAL;
}
return 0;
}
/* shared */
static int pcxhr_digital_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = PCXHR_DIGITAL_LEVEL_MIN; /* -109.5 dB */
uinfo->value.integer.max = PCXHR_DIGITAL_LEVEL_MAX; /* 18.0 dB */
return 0;
}
static int pcxhr_pcm_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */
int *stored_volume;
int is_capture = kcontrol->private_value;
mutex_lock(&chip->mgr->mixer_mutex);
if (is_capture) /* digital capture */
stored_volume = chip->digital_capture_volume;
else /* digital playback */
stored_volume = chip->digital_playback_volume[idx];
ucontrol->value.integer.value[0] = stored_volume[0];
ucontrol->value.integer.value[1] = stored_volume[1];
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int pcxhr_pcm_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */
int changed = 0;
int is_capture = kcontrol->private_value;
int *stored_volume;
int i;
mutex_lock(&chip->mgr->mixer_mutex);
if (is_capture) /* digital capture */
stored_volume = chip->digital_capture_volume;
else /* digital playback */
stored_volume = chip->digital_playback_volume[idx];
for (i = 0; i < 2; i++) {
int vol = ucontrol->value.integer.value[i];
if (vol < PCXHR_DIGITAL_LEVEL_MIN ||
vol > PCXHR_DIGITAL_LEVEL_MAX)
continue;
if (stored_volume[i] != vol) {
stored_volume[i] = vol;
changed = 1;
if (is_capture) /* update capture volume */
pcxhr_update_audio_pipe_level(chip, 1, i);
}
}
if (!is_capture && changed) /* update playback volume */
pcxhr_update_playback_stream_level(chip, idx);
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new snd_pcxhr_pcm_vol =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
/* name will be filled later */
/* count will be filled later */
.info = pcxhr_digital_vol_info, /* shared */
.get = pcxhr_pcm_vol_get,
.put = pcxhr_pcm_vol_put,
.tlv = { .p = db_scale_digital },
};
static int pcxhr_pcm_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */
mutex_lock(&chip->mgr->mixer_mutex);
ucontrol->value.integer.value[0] = chip->digital_playback_active[idx][0];
ucontrol->value.integer.value[1] = chip->digital_playback_active[idx][1];
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int pcxhr_pcm_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); /* index */
int i, j;
mutex_lock(&chip->mgr->mixer_mutex);
j = idx;
for (i = 0; i < 2; i++) {
if (chip->digital_playback_active[j][i] !=
ucontrol->value.integer.value[i]) {
chip->digital_playback_active[j][i] =
!!ucontrol->value.integer.value[i];
changed = 1;
}
}
if (changed)
pcxhr_update_playback_stream_level(chip, idx);
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new pcxhr_control_pcm_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.count = PCXHR_PLAYBACK_STREAMS,
.info = pcxhr_sw_info, /* shared */
.get = pcxhr_pcm_sw_get,
.put = pcxhr_pcm_sw_put
};
/*
* monitoring level control
*/
static int pcxhr_monitor_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->mgr->mixer_mutex);
ucontrol->value.integer.value[0] = chip->monitoring_volume[0];
ucontrol->value.integer.value[1] = chip->monitoring_volume[1];
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int pcxhr_monitor_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
int i;
mutex_lock(&chip->mgr->mixer_mutex);
for (i = 0; i < 2; i++) {
if (chip->monitoring_volume[i] !=
ucontrol->value.integer.value[i]) {
chip->monitoring_volume[i] =
ucontrol->value.integer.value[i];
if (chip->monitoring_active[i])
/* update monitoring volume and mute */
/* do only when monitoring is unmuted */
pcxhr_update_audio_pipe_level(chip, 0, i);
changed = 1;
}
}
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new pcxhr_control_monitor_vol = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Monitoring Playback Volume",
.info = pcxhr_digital_vol_info, /* shared */
.get = pcxhr_monitor_vol_get,
.put = pcxhr_monitor_vol_put,
.tlv = { .p = db_scale_digital },
};
/*
* monitoring switch control
*/
static int pcxhr_monitor_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
mutex_lock(&chip->mgr->mixer_mutex);
ucontrol->value.integer.value[0] = chip->monitoring_active[0];
ucontrol->value.integer.value[1] = chip->monitoring_active[1];
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int pcxhr_monitor_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
int i;
mutex_lock(&chip->mgr->mixer_mutex);
for (i = 0; i < 2; i++) {
if (chip->monitoring_active[i] !=
ucontrol->value.integer.value[i]) {
chip->monitoring_active[i] =
!!ucontrol->value.integer.value[i];
changed |= (1<<i); /* mask 0x01 and 0x02 */
}
}
if (changed & 0x01)
/* update left monitoring volume and mute */
pcxhr_update_audio_pipe_level(chip, 0, 0);
if (changed & 0x02)
/* update right monitoring volume and mute */
pcxhr_update_audio_pipe_level(chip, 0, 1);
mutex_unlock(&chip->mgr->mixer_mutex);
return (changed != 0);
}
static struct snd_kcontrol_new pcxhr_control_monitor_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Monitoring Playback Switch",
.info = pcxhr_sw_info, /* shared */
.get = pcxhr_monitor_sw_get,
.put = pcxhr_monitor_sw_put
};
/*
* audio source select
*/
#define PCXHR_SOURCE_AUDIO01_UER 0x000100
#define PCXHR_SOURCE_AUDIO01_SYNC 0x000200
#define PCXHR_SOURCE_AUDIO23_UER 0x000400
#define PCXHR_SOURCE_AUDIO45_UER 0x001000
#define PCXHR_SOURCE_AUDIO67_UER 0x040000
static int pcxhr_set_audio_source(struct snd_pcxhr* chip)
{
struct pcxhr_rmh rmh;
unsigned int mask, reg;
unsigned int codec;
int err, changed;
switch (chip->chip_idx) {
case 0 : mask = PCXHR_SOURCE_AUDIO01_UER; codec = CS8420_01_CS; break;
case 1 : mask = PCXHR_SOURCE_AUDIO23_UER; codec = CS8420_23_CS; break;
case 2 : mask = PCXHR_SOURCE_AUDIO45_UER; codec = CS8420_45_CS; break;
case 3 : mask = PCXHR_SOURCE_AUDIO67_UER; codec = CS8420_67_CS; break;
default: return -EINVAL;
}
if (chip->audio_capture_source != 0) {
reg = mask; /* audio source from digital plug */
} else {
reg = 0; /* audio source from analog plug */
}
/* set the input source */
pcxhr_write_io_num_reg_cont(chip->mgr, mask, reg, &changed);
/* resync them (otherwise channel inversion possible) */
if (changed) {
pcxhr_init_rmh(&rmh, CMD_RESYNC_AUDIO_INPUTS);
rmh.cmd[0] |= (1 << chip->chip_idx);
err = pcxhr_send_msg(chip->mgr, &rmh);
if (err)
return err;
}
if (chip->mgr->board_aes_in_192k) {
int i;
unsigned int src_config = 0xC0;
/* update all src configs with one call */
for (i = 0; (i < 4) && (i < chip->mgr->capture_chips); i++) {
if (chip->mgr->chip[i]->audio_capture_source == 2)
src_config |= (1 << (3 - i));
}
/* set codec SRC on off */
pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE);
rmh.cmd_len = 2;
rmh.cmd[0] |= IO_NUM_REG_CONFIG_SRC;
rmh.cmd[1] = src_config;
err = pcxhr_send_msg(chip->mgr, &rmh);
} else {
int use_src = 0;
if (chip->audio_capture_source == 2)
use_src = 1;
/* set codec SRC on off */
pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE);
rmh.cmd_len = 3;
rmh.cmd[0] |= IO_NUM_UER_CHIP_REG;
rmh.cmd[1] = codec;
rmh.cmd[2] = ((CS8420_DATA_FLOW_CTL & CHIP_SIG_AND_MAP_SPI) |
(use_src ? 0x41 : 0x54));
err = pcxhr_send_msg(chip->mgr, &rmh);
if (err)
return err;
rmh.cmd[2] = ((CS8420_CLOCK_SRC_CTL & CHIP_SIG_AND_MAP_SPI) |
(use_src ? 0x41 : 0x49));
err = pcxhr_send_msg(chip->mgr, &rmh);
}
return err;
}
static int pcxhr_audio_src_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static const char *texts[5] = {
"Line", "Digital", "Digi+SRC", "Mic", "Line+Mic"
};
int i;
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
i = 2; /* no SRC, no Mic available */
if (chip->mgr->board_has_aes1) {
i = 3; /* SRC available */
if (chip->mgr->board_has_mic)
i = 5; /* Mic and MicroMix available */
}
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = i;
if (uinfo->value.enumerated.item > (i-1))
uinfo->value.enumerated.item = i-1;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int pcxhr_audio_src_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = chip->audio_capture_source;
return 0;
}
static int pcxhr_audio_src_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int ret = 0;
int i = 2; /* no SRC, no Mic available */
if (chip->mgr->board_has_aes1) {
i = 3; /* SRC available */
if (chip->mgr->board_has_mic)
i = 5; /* Mic and MicroMix available */
}
if (ucontrol->value.enumerated.item[0] >= i)
return -EINVAL;
mutex_lock(&chip->mgr->mixer_mutex);
if (chip->audio_capture_source != ucontrol->value.enumerated.item[0]) {
chip->audio_capture_source = ucontrol->value.enumerated.item[0];
if (chip->mgr->is_hr_stereo)
hr222_set_audio_source(chip);
else
pcxhr_set_audio_source(chip);
ret = 1;
}
mutex_unlock(&chip->mgr->mixer_mutex);
return ret;
}
static struct snd_kcontrol_new pcxhr_control_audio_src = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = pcxhr_audio_src_info,
.get = pcxhr_audio_src_get,
.put = pcxhr_audio_src_put,
};
/*
* clock type selection
* enum pcxhr_clock_type {
* PCXHR_CLOCK_TYPE_INTERNAL = 0,
* PCXHR_CLOCK_TYPE_WORD_CLOCK,
* PCXHR_CLOCK_TYPE_AES_SYNC,
* PCXHR_CLOCK_TYPE_AES_1,
* PCXHR_CLOCK_TYPE_AES_2,
* PCXHR_CLOCK_TYPE_AES_3,
* PCXHR_CLOCK_TYPE_AES_4,
* PCXHR_CLOCK_TYPE_MAX = PCXHR_CLOCK_TYPE_AES_4,
* HR22_CLOCK_TYPE_INTERNAL = PCXHR_CLOCK_TYPE_INTERNAL,
* HR22_CLOCK_TYPE_AES_SYNC,
* HR22_CLOCK_TYPE_AES_1,
* HR22_CLOCK_TYPE_MAX = HR22_CLOCK_TYPE_AES_1,
* };
*/
static int pcxhr_clock_type_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static const char *textsPCXHR[7] = {
"Internal", "WordClock", "AES Sync",
"AES 1", "AES 2", "AES 3", "AES 4"
};
static const char *textsHR22[3] = {
"Internal", "AES Sync", "AES 1"
};
const char **texts;
struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol);
int clock_items = 2; /* at least Internal and AES Sync clock */
if (mgr->board_has_aes1) {
clock_items += mgr->capture_chips; /* add AES x */
if (!mgr->is_hr_stereo)
clock_items += 1; /* add word clock */
}
if (mgr->is_hr_stereo) {
texts = textsHR22;
snd_BUG_ON(clock_items > (HR22_CLOCK_TYPE_MAX+1));
} else {
texts = textsPCXHR;
snd_BUG_ON(clock_items > (PCXHR_CLOCK_TYPE_MAX+1));
}
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = clock_items;
if (uinfo->value.enumerated.item >= clock_items)
uinfo->value.enumerated.item = clock_items-1;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int pcxhr_clock_type_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = mgr->use_clock_type;
return 0;
}
static int pcxhr_clock_type_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol);
int rate, ret = 0;
unsigned int clock_items = 2; /* at least Internal and AES Sync clock */
if (mgr->board_has_aes1) {
clock_items += mgr->capture_chips; /* add AES x */
if (!mgr->is_hr_stereo)
clock_items += 1; /* add word clock */
}
if (ucontrol->value.enumerated.item[0] >= clock_items)
return -EINVAL;
mutex_lock(&mgr->mixer_mutex);
if (mgr->use_clock_type != ucontrol->value.enumerated.item[0]) {
mutex_lock(&mgr->setup_mutex);
mgr->use_clock_type = ucontrol->value.enumerated.item[0];
rate = 0;
if (mgr->use_clock_type != PCXHR_CLOCK_TYPE_INTERNAL) {
pcxhr_get_external_clock(mgr, mgr->use_clock_type,
&rate);
} else {
rate = mgr->sample_rate;
if (!rate)
rate = 48000;
}
if (rate) {
pcxhr_set_clock(mgr, rate);
if (mgr->sample_rate)
mgr->sample_rate = rate;
}
mutex_unlock(&mgr->setup_mutex);
ret = 1; /* return 1 even if the set was not done. ok ? */
}
mutex_unlock(&mgr->mixer_mutex);
return ret;
}
static struct snd_kcontrol_new pcxhr_control_clock_type = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Clock Mode",
.info = pcxhr_clock_type_info,
.get = pcxhr_clock_type_get,
.put = pcxhr_clock_type_put,
};
/*
* clock rate control
* specific control that scans the sample rates on the external plugs
*/
static int pcxhr_clock_rate_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 3 + mgr->capture_chips;
uinfo->value.integer.min = 0; /* clock not present */
uinfo->value.integer.max = 192000; /* max sample rate 192 kHz */
return 0;
}
static int pcxhr_clock_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct pcxhr_mgr *mgr = snd_kcontrol_chip(kcontrol);
int i, err, rate;
mutex_lock(&mgr->mixer_mutex);
for(i = 0; i < 3 + mgr->capture_chips; i++) {
if (i == PCXHR_CLOCK_TYPE_INTERNAL)
rate = mgr->sample_rate_real;
else {
err = pcxhr_get_external_clock(mgr, i, &rate);
if (err)
break;
}
ucontrol->value.integer.value[i] = rate;
}
mutex_unlock(&mgr->mixer_mutex);
return 0;
}
static struct snd_kcontrol_new pcxhr_control_clock_rate = {
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "Clock Rates",
.info = pcxhr_clock_rate_info,
.get = pcxhr_clock_rate_get,
};
/*
* IEC958 status bits
*/
static int pcxhr_iec958_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int pcxhr_iec958_capture_byte(struct snd_pcxhr *chip,
int aes_idx, unsigned char *aes_bits)
{
int i, err;
unsigned char temp;
struct pcxhr_rmh rmh;
pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_READ);
rmh.cmd[0] |= IO_NUM_UER_CHIP_REG;
switch (chip->chip_idx) {
/* instead of CS8420_01_CS use CS8416_01_CS for AES SYNC plug */
case 0: rmh.cmd[1] = CS8420_01_CS; break;
case 1: rmh.cmd[1] = CS8420_23_CS; break;
case 2: rmh.cmd[1] = CS8420_45_CS; break;
case 3: rmh.cmd[1] = CS8420_67_CS; break;
default: return -EINVAL;
}
if (chip->mgr->board_aes_in_192k) {
switch (aes_idx) {
case 0: rmh.cmd[2] = CS8416_CSB0; break;
case 1: rmh.cmd[2] = CS8416_CSB1; break;
case 2: rmh.cmd[2] = CS8416_CSB2; break;
case 3: rmh.cmd[2] = CS8416_CSB3; break;
case 4: rmh.cmd[2] = CS8416_CSB4; break;
default: return -EINVAL;
}
} else {
switch (aes_idx) {
/* instead of CS8420_CSB0 use CS8416_CSBx for AES SYNC plug */
case 0: rmh.cmd[2] = CS8420_CSB0; break;
case 1: rmh.cmd[2] = CS8420_CSB1; break;
case 2: rmh.cmd[2] = CS8420_CSB2; break;
case 3: rmh.cmd[2] = CS8420_CSB3; break;
case 4: rmh.cmd[2] = CS8420_CSB4; break;
default: return -EINVAL;
}
}
/* size and code the chip id for the fpga */
rmh.cmd[1] &= 0x0fffff;
/* chip signature + map for spi read */
rmh.cmd[2] &= CHIP_SIG_AND_MAP_SPI;
rmh.cmd_len = 3;
err = pcxhr_send_msg(chip->mgr, &rmh);
if (err)
return err;
if (chip->mgr->board_aes_in_192k) {
temp = (unsigned char)rmh.stat[1];
} else {
temp = 0;
/* reversed bit order (not with CS8416_01_CS) */
for (i = 0; i < 8; i++) {
temp <<= 1;
if (rmh.stat[1] & (1 << i))
temp |= 1;
}
}
snd_printdd("read iec958 AES %d byte %d = 0x%x\n",
chip->chip_idx, aes_idx, temp);
*aes_bits = temp;
return 0;
}
static int pcxhr_iec958_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
unsigned char aes_bits;
int i, err;
mutex_lock(&chip->mgr->mixer_mutex);
for(i = 0; i < 5; i++) {
if (kcontrol->private_value == 0) /* playback */
aes_bits = chip->aes_bits[i];
else { /* capture */
if (chip->mgr->is_hr_stereo)
err = hr222_iec958_capture_byte(chip, i,
&aes_bits);
else
err = pcxhr_iec958_capture_byte(chip, i,
&aes_bits);
if (err)
break;
}
ucontrol->value.iec958.status[i] = aes_bits;
}
mutex_unlock(&chip->mgr->mixer_mutex);
return 0;
}
static int pcxhr_iec958_mask_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int i;
for (i = 0; i < 5; i++)
ucontrol->value.iec958.status[i] = 0xff;
return 0;
}
static int pcxhr_iec958_update_byte(struct snd_pcxhr *chip,
int aes_idx, unsigned char aes_bits)
{
int i, err, cmd;
unsigned char new_bits = aes_bits;
unsigned char old_bits = chip->aes_bits[aes_idx];
struct pcxhr_rmh rmh;
for (i = 0; i < 8; i++) {
if ((old_bits & 0x01) != (new_bits & 0x01)) {
cmd = chip->chip_idx & 0x03; /* chip index 0..3 */
if (chip->chip_idx > 3)
/* new bit used if chip_idx>3 (PCX1222HR) */
cmd |= 1 << 22;
cmd |= ((aes_idx << 3) + i) << 2; /* add bit offset */
cmd |= (new_bits & 0x01) << 23; /* add bit value */
pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE);
rmh.cmd[0] |= IO_NUM_REG_CUER;
rmh.cmd[1] = cmd;
rmh.cmd_len = 2;
snd_printdd("write iec958 AES %d byte %d bit %d (cmd %x)\n",
chip->chip_idx, aes_idx, i, cmd);
err = pcxhr_send_msg(chip->mgr, &rmh);
if (err)
return err;
}
old_bits >>= 1;
new_bits >>= 1;
}
chip->aes_bits[aes_idx] = aes_bits;
return 0;
}
static int pcxhr_iec958_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pcxhr *chip = snd_kcontrol_chip(kcontrol);
int i, changed = 0;
/* playback */
mutex_lock(&chip->mgr->mixer_mutex);
for (i = 0; i < 5; i++) {
if (ucontrol->value.iec958.status[i] != chip->aes_bits[i]) {
if (chip->mgr->is_hr_stereo)
hr222_iec958_update_byte(chip, i,
ucontrol->value.iec958.status[i]);
else
pcxhr_iec958_update_byte(chip, i,
ucontrol->value.iec958.status[i]);
changed = 1;
}
}
mutex_unlock(&chip->mgr->mixer_mutex);
return changed;
}
static struct snd_kcontrol_new pcxhr_control_playback_iec958_mask = {
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
.info = pcxhr_iec958_info,
.get = pcxhr_iec958_mask_get
};
static struct snd_kcontrol_new pcxhr_control_playback_iec958 = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
.info = pcxhr_iec958_info,
.get = pcxhr_iec958_get,
.put = pcxhr_iec958_put,
.private_value = 0 /* playback */
};
static struct snd_kcontrol_new pcxhr_control_capture_iec958_mask = {
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",CAPTURE,MASK),
.info = pcxhr_iec958_info,
.get = pcxhr_iec958_mask_get
};
static struct snd_kcontrol_new pcxhr_control_capture_iec958 = {
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",CAPTURE,DEFAULT),
.info = pcxhr_iec958_info,
.get = pcxhr_iec958_get,
.private_value = 1 /* capture */
};
static void pcxhr_init_audio_levels(struct snd_pcxhr *chip)
{
int i;
for (i = 0; i < 2; i++) {
if (chip->nb_streams_play) {
int j;
/* at boot time the digital volumes are unmuted 0dB */
for (j = 0; j < PCXHR_PLAYBACK_STREAMS; j++) {
chip->digital_playback_active[j][i] = 1;
chip->digital_playback_volume[j][i] =
PCXHR_DIGITAL_ZERO_LEVEL;
}
/* after boot, only two bits are set on the uer
* interface
*/
chip->aes_bits[0] = (IEC958_AES0_PROFESSIONAL |
IEC958_AES0_PRO_FS_48000);
#ifdef CONFIG_SND_DEBUG
/* analog volumes for playback
* (is LEVEL_MIN after boot)
*/
chip->analog_playback_active[i] = 1;
if (chip->mgr->is_hr_stereo)
chip->analog_playback_volume[i] =
HR222_LINE_PLAYBACK_ZERO_LEVEL;
else {
chip->analog_playback_volume[i] =
PCXHR_LINE_PLAYBACK_ZERO_LEVEL;
pcxhr_update_analog_audio_level(chip, 0, i);
}
#endif
/* stereo cards need to be initialised after boot */
if (chip->mgr->is_hr_stereo)
hr222_update_analog_audio_level(chip, 0, i);
}
if (chip->nb_streams_capt) {
/* at boot time the digital volumes are unmuted 0dB */
chip->digital_capture_volume[i] =
PCXHR_DIGITAL_ZERO_LEVEL;
chip->analog_capture_active = 1;
#ifdef CONFIG_SND_DEBUG
/* analog volumes for playback
* (is LEVEL_MIN after boot)
*/
if (chip->mgr->is_hr_stereo)
chip->analog_capture_volume[i] =
HR222_LINE_CAPTURE_ZERO_LEVEL;
else {
chip->analog_capture_volume[i] =
PCXHR_LINE_CAPTURE_ZERO_LEVEL;
pcxhr_update_analog_audio_level(chip, 1, i);
}
#endif
/* stereo cards need to be initialised after boot */
if (chip->mgr->is_hr_stereo)
hr222_update_analog_audio_level(chip, 1, i);
}
}
return;
}
int pcxhr_create_mixer(struct pcxhr_mgr *mgr)
{
struct snd_pcxhr *chip;
int err, i;
mutex_init(&mgr->mixer_mutex); /* can be in another place */
for (i = 0; i < mgr->num_cards; i++) {
struct snd_kcontrol_new temp;
chip = mgr->chip[i];
if (chip->nb_streams_play) {
/* analog output level control */
temp = pcxhr_control_analog_level;
temp.name = "Master Playback Volume";
temp.private_value = 0; /* playback */
if (mgr->is_hr_stereo)
temp.tlv.p = db_scale_a_hr222_playback;
else
temp.tlv.p = db_scale_analog_playback;
err = snd_ctl_add(chip->card,
snd_ctl_new1(&temp, chip));
if (err < 0)
return err;
/* output mute controls */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_output_switch,
chip));
if (err < 0)
return err;
temp = snd_pcxhr_pcm_vol;
temp.name = "PCM Playback Volume";
temp.count = PCXHR_PLAYBACK_STREAMS;
temp.private_value = 0; /* playback */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&temp, chip));
if (err < 0)
return err;
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_pcm_switch, chip));
if (err < 0)
return err;
/* IEC958 controls */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_playback_iec958_mask,
chip));
if (err < 0)
return err;
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_playback_iec958,
chip));
if (err < 0)
return err;
}
if (chip->nb_streams_capt) {
/* analog input level control */
temp = pcxhr_control_analog_level;
temp.name = "Line Capture Volume";
temp.private_value = 1; /* capture */
if (mgr->is_hr_stereo)
temp.tlv.p = db_scale_a_hr222_capture;
else
temp.tlv.p = db_scale_analog_capture;
err = snd_ctl_add(chip->card,
snd_ctl_new1(&temp, chip));
if (err < 0)
return err;
temp = snd_pcxhr_pcm_vol;
temp.name = "PCM Capture Volume";
temp.count = 1;
temp.private_value = 1; /* capture */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&temp, chip));
if (err < 0)
return err;
/* Audio source */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_audio_src, chip));
if (err < 0)
return err;
/* IEC958 controls */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_capture_iec958_mask,
chip));
if (err < 0)
return err;
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_capture_iec958,
chip));
if (err < 0)
return err;
if (mgr->is_hr_stereo) {
err = hr222_add_mic_controls(chip);
if (err < 0)
return err;
}
}
/* monitoring only if playback and capture device available */
if (chip->nb_streams_capt > 0 && chip->nb_streams_play > 0) {
/* monitoring */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_monitor_vol, chip));
if (err < 0)
return err;
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_monitor_sw, chip));
if (err < 0)
return err;
}
if (i == 0) {
/* clock mode only one control per pcxhr */
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_clock_type, mgr));
if (err < 0)
return err;
/* non standard control used to scan
* the external clock presence/frequencies
*/
err = snd_ctl_add(chip->card,
snd_ctl_new1(&pcxhr_control_clock_rate, mgr));
if (err < 0)
return err;
}
/* init values for the mixer data */
pcxhr_init_audio_levels(chip);
}
return 0;
}
| gpl-2.0 |
wanahmadzainie/linux-mainline | drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 65 | 20465 | /*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef CONFIG_RFS_ACCEL
#include <linux/hash.h>
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include "en.h"
struct arfs_tuple {
__be16 etype;
u8 ip_proto;
union {
__be32 src_ipv4;
struct in6_addr src_ipv6;
};
union {
__be32 dst_ipv4;
struct in6_addr dst_ipv6;
};
__be16 src_port;
__be16 dst_port;
};
struct arfs_rule {
struct mlx5e_priv *priv;
struct work_struct arfs_work;
struct mlx5_flow_handle *rule;
struct hlist_node hlist;
int rxq;
/* Flow ID passed to ndo_rx_flow_steer */
int flow_id;
/* Filter ID returned by ndo_rx_flow_steer */
int filter_id;
struct arfs_tuple tuple;
};
#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
for (i = 0; i < ARFS_NUM_TYPES; i++) \
mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
for (j = 0; j < ARFS_HASH_SIZE; j++) \
hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
{
switch (type) {
case ARFS_IPV4_TCP:
return MLX5E_TT_IPV4_TCP;
case ARFS_IPV4_UDP:
return MLX5E_TT_IPV4_UDP;
case ARFS_IPV6_TCP:
return MLX5E_TT_IPV6_TCP;
case ARFS_IPV6_UDP:
return MLX5E_TT_IPV6_UDP;
default:
return -EINVAL;
}
}
static int arfs_disable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir;
int err = 0;
int tt;
int i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.tir_num = tir[i].tirn;
tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest, NULL);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed\n",
__func__);
return err;
}
}
return 0;
}
static void arfs_del_rules(struct mlx5e_priv *priv);
int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{
arfs_del_rules(priv);
return arfs_disable(priv);
}
int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
int err = 0;
int tt;
int i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
tt = arfs_get_tt(i);
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest, NULL);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc destination failed err=%d\n",
__func__, err);
arfs_disable(priv);
return err;
}
}
return 0;
}
static void arfs_destroy_table(struct arfs_table *arfs_t)
{
mlx5_del_flow_rules(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft);
}
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
{
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return;
arfs_del_rules(priv);
destroy_workqueue(priv->fs.arfs.wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
}
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_spec *spec;
int err = 0;
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
case ARFS_IPV4_TCP:
dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
break;
case ARFS_IPV4_UDP:
dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
break;
case ARFS_IPV6_TCP:
dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
break;
case ARFS_IPV6_UDP:
dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
break;
default:
err = -EINVAL;
goto out;
}
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
__func__, type);
}
out:
kvfree(spec);
return err;
}
#define MLX5E_ARFS_NUM_GROUPS 2
#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
MLX5E_ARFS_GROUP2_SIZE)
static int arfs_create_groups(struct mlx5e_flow_table *ft,
enum arfs_type type)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *outer_headers_c;
int ix = 0;
u32 *in;
int err;
u8 *mc;
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
in = mlx5_vzalloc(inlen);
if (!in || !ft->g) {
kvfree(ft->g);
kvfree(in);
return -ENOMEM;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
switch (type) {
case ARFS_IPV4_TCP:
case ARFS_IPV6_TCP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
break;
case ARFS_IPV4_UDP:
case ARFS_IPV6_UDP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
break;
default:
err = -EINVAL;
goto out;
}
switch (type) {
case ARFS_IPV4_TCP:
case ARFS_IPV4_UDP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
break;
case ARFS_IPV6_TCP:
case ARFS_IPV6_UDP:
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff, 16);
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, 16);
break;
default:
err = -EINVAL;
goto out;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_ARFS_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
ft->num_groups++;
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_ARFS_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
ft->num_groups++;
kvfree(in);
return 0;
err:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
out:
kvfree(in);
return err;
}
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
err = arfs_create_groups(ft, type);
if (err)
goto err;
err = arfs_add_default_rule(priv, type);
if (err)
goto err;
return 0;
err:
mlx5e_destroy_flow_table(ft);
return err;
}
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
int err = 0;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
spin_lock_init(&priv->fs.arfs.arfs_lock);
INIT_LIST_HEAD(&priv->fs.arfs.rules);
priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs.wq)
return -ENOMEM;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i);
if (err)
goto err;
}
return 0;
err:
mlx5e_arfs_destroy_tables(priv);
return err;
}
#define MLX5E_ARFS_EXPIRY_QUOTA 60
static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
int quota = 0;
int i;
int j;
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
break;
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
arfs_rule->filter_id)) {
hlist_del_init(&arfs_rule->hlist);
hlist_add_head(&arfs_rule->hlist, &del_list);
}
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
hlist_del(&arfs_rule->hlist);
kfree(arfs_rule);
}
}
static void arfs_del_rules(struct mlx5e_priv *priv)
{
struct hlist_node *htmp;
struct arfs_rule *rule;
int i;
int j;
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
if (rule->rule)
mlx5_del_flow_rules(rule->rule);
hlist_del(&rule->hlist);
kfree(rule);
}
}
static struct hlist_head *
arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
__be16 dst_port)
{
unsigned long l;
int bucket_idx;
l = (__force unsigned long)src_port |
((__force unsigned long)dst_port << 2);
bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
return &arfs_t->rules_hash[bucket_idx];
}
static u8 arfs_get_ip_proto(const struct sk_buff *skb)
{
return (skb->protocol == htons(ETH_P_IP)) ?
ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
}
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
u8 ip_proto, __be16 etype)
{
if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
return &arfs->arfs_tables[ARFS_IPV4_TCP];
if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
return &arfs->arfs_tables[ARFS_IPV4_UDP];
if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
return &arfs->arfs_tables[ARFS_IPV6_TCP];
if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
return &arfs->arfs_tables[ARFS_IPV6_UDP];
return NULL;
}
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
struct mlx5_flow_act flow_act = {
.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
.encap_id = 0,
};
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest;
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
int err = 0;
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.ethertype);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
ntohs(tuple->etype));
arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
if (!arfs_table) {
err = -EINVAL;
goto out;
}
ft = arfs_table->ft.t;
if (tuple->ip_proto == IPPROTO_TCP) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_dport);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_sport);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
ntohs(tuple->dst_port));
MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
ntohs(tuple->src_port));
} else {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_dport);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_sport);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
ntohs(tuple->dst_port));
MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
ntohs(tuple->src_port));
}
if (tuple->etype == htons(ETH_P_IP)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
&tuple->src_ipv4,
4);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&tuple->dst_ipv4,
4);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
} else {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
&tuple->src_ipv6,
16);
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&tuple->dst_ipv6,
16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff,
16);
memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff,
16);
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq, err);
}
out:
kvfree(spec);
return err ? ERR_PTR(err) : rule;
}
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule, u16 rxq)
{
struct mlx5_flow_destination dst;
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->direct_tir[rxq].tirn;
err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err)
netdev_warn(priv->netdev,
"Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
}
static void arfs_handle_work(struct work_struct *work)
{
struct arfs_rule *arfs_rule = container_of(work,
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
struct mlx5_flow_handle *rule;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&priv->fs.arfs.arfs_lock);
hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
goto out;
}
mutex_unlock(&priv->state_lock);
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
if (IS_ERR(rule))
goto out;
arfs_rule->rule = rule;
} else {
arfs_modify_rule_rq(priv, arfs_rule->rule,
arfs_rule->rxq);
}
out:
arfs_may_expire_flow(priv);
}
/* return L4 destination port from ip4/6 packets */
static __be16 arfs_get_dst_port(const struct sk_buff *skb)
{
char *transport_header;
transport_header = skb_transport_header(skb);
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
return ((struct tcphdr *)transport_header)->dest;
return ((struct udphdr *)transport_header)->dest;
}
/* return L4 source port from ip4/6 packets */
static __be16 arfs_get_src_port(const struct sk_buff *skb)
{
char *transport_header;
transport_header = skb_transport_header(skb);
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
return ((struct tcphdr *)transport_header)->source;
return ((struct udphdr *)transport_header)->source;
}
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_table *arfs_t,
const struct sk_buff *skb,
u16 rxq, u32 flow_id)
{
struct arfs_rule *rule;
struct arfs_tuple *tuple;
rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule)
return NULL;
rule->priv = priv;
rule->rxq = rxq;
INIT_WORK(&rule->arfs_work, arfs_handle_work);
tuple = &rule->tuple;
tuple->etype = skb->protocol;
if (tuple->etype == htons(ETH_P_IP)) {
tuple->src_ipv4 = ip_hdr(skb)->saddr;
tuple->dst_ipv4 = ip_hdr(skb)->daddr;
} else {
memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
sizeof(struct in6_addr));
memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
sizeof(struct in6_addr));
}
tuple->ip_proto = arfs_get_ip_proto(skb);
tuple->src_port = arfs_get_src_port(skb);
tuple->dst_port = arfs_get_dst_port(skb);
rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
tuple->dst_port));
return rule;
}
static bool arfs_cmp_ips(struct arfs_tuple *tuple,
const struct sk_buff *skb)
{
if (tuple->etype == htons(ETH_P_IP) &&
tuple->src_ipv4 == ip_hdr(skb)->saddr &&
tuple->dst_ipv4 == ip_hdr(skb)->daddr)
return true;
if (tuple->etype == htons(ETH_P_IPV6) &&
(!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
sizeof(struct in6_addr))) &&
(!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
sizeof(struct in6_addr))))
return true;
return false;
}
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
const struct sk_buff *skb)
{
struct arfs_rule *arfs_rule;
struct hlist_head *head;
__be16 src_port = arfs_get_src_port(skb);
__be16 dst_port = arfs_get_dst_port(skb);
head = arfs_hash_bucket(arfs_t, src_port, dst_port);
hlist_for_each_entry(arfs_rule, head, hlist) {
if (arfs_rule->tuple.src_port == src_port &&
arfs_rule->tuple.dst_port == dst_port &&
arfs_cmp_ips(&arfs_rule->tuple, skb)) {
return arfs_rule;
}
}
return NULL;
}
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
if (skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
if (!arfs_t)
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
arfs_rule = arfs_find_rule(arfs_t, skb);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) {
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
arfs_rule->rxq = rxq_index;
} else {
arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
rxq_index, flow_id);
if (!arfs_rule) {
spin_unlock_bh(&arfs->arfs_lock);
return -ENOMEM;
}
}
queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
#endif
| gpl-2.0 |
JudsonWilson/CS244_RC3_Kernel | drivers/net/team/team_mode_loadbalance.c | 65 | 17317 | /*
* drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
* Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_team.h>
struct lb_priv;
typedef struct team_port *lb_select_tx_port_func_t(struct team *,
struct lb_priv *,
struct sk_buff *,
unsigned char);
#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
struct lb_stats {
u64 tx_bytes;
};
struct lb_pcpu_stats {
struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE];
struct u64_stats_sync syncp;
};
struct lb_stats_info {
struct lb_stats stats;
struct lb_stats last_stats;
struct team_option_inst_info *opt_inst_info;
};
struct lb_port_mapping {
struct team_port __rcu *port;
struct team_option_inst_info *opt_inst_info;
};
struct lb_priv_ex {
struct team *team;
struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
struct sock_fprog *orig_fprog;
struct {
unsigned int refresh_interval; /* in tenths of second */
struct delayed_work refresh_dw;
struct lb_stats_info info[LB_TX_HASHTABLE_SIZE];
} stats;
};
struct lb_priv {
struct sk_filter __rcu *fp;
lb_select_tx_port_func_t __rcu *select_tx_port_func;
struct lb_pcpu_stats __percpu *pcpu_stats;
struct lb_priv_ex *ex; /* priv extension */
};
static struct lb_priv *get_lb_priv(struct team *team)
{
return (struct lb_priv *) &team->mode_priv;
}
struct lb_port_priv {
struct lb_stats __percpu *pcpu_stats;
struct lb_stats_info stats_info;
};
static struct lb_port_priv *get_lb_port_priv(struct team_port *port)
{
return (struct lb_port_priv *) &port->mode_priv;
}
#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
(lb_priv)->ex->tx_hash_to_port_mapping[hash].port
#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
(lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
struct team_port *port)
{
struct lb_priv *lb_priv = get_lb_priv(team);
bool changed = false;
int i;
for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) {
struct lb_port_mapping *pm;
pm = &lb_priv->ex->tx_hash_to_port_mapping[i];
if (rcu_access_pointer(pm->port) == port) {
RCU_INIT_POINTER(pm->port, NULL);
team_option_inst_set_change(pm->opt_inst_info);
changed = true;
}
}
if (changed)
team_options_change_check(team);
}
/* Basic tx selection based solely by hash */
static struct team_port *lb_hash_select_tx_port(struct team *team,
struct lb_priv *lb_priv,
struct sk_buff *skb,
unsigned char hash)
{
int port_index = team_num_to_port_index(team, hash);
return team_get_port_by_index_rcu(team, port_index);
}
/* Hash to port mapping select tx port */
static struct team_port *lb_htpm_select_tx_port(struct team *team,
struct lb_priv *lb_priv,
struct sk_buff *skb,
unsigned char hash)
{
return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
}
struct lb_select_tx_port {
char *name;
lb_select_tx_port_func_t *func;
};
static const struct lb_select_tx_port lb_select_tx_port_list[] = {
{
.name = "hash",
.func = lb_hash_select_tx_port,
},
{
.name = "hash_to_port_mapping",
.func = lb_htpm_select_tx_port,
},
};
#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func)
{
int i;
for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
const struct lb_select_tx_port *item;
item = &lb_select_tx_port_list[i];
if (item->func == func)
return item->name;
}
return NULL;
}
static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
{
int i;
for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
const struct lb_select_tx_port *item;
item = &lb_select_tx_port_list[i];
if (!strcmp(item->name, name))
return item->func;
}
return NULL;
}
static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
struct sk_buff *skb)
{
struct sk_filter *fp;
uint32_t lhash;
unsigned char *c;
fp = rcu_dereference_bh(lb_priv->fp);
if (unlikely(!fp))
return 0;
lhash = SK_RUN_FILTER(fp, skb);
c = (char *) &lhash;
return c[0] ^ c[1] ^ c[2] ^ c[3];
}
static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv,
struct lb_port_priv *lb_port_priv,
unsigned char hash)
{
struct lb_pcpu_stats *pcpu_stats;
struct lb_stats *port_stats;
struct lb_stats *hash_stats;
pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats);
port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats);
hash_stats = &pcpu_stats->hash_stats[hash];
u64_stats_update_begin(&pcpu_stats->syncp);
port_stats->tx_bytes += tx_bytes;
hash_stats->tx_bytes += tx_bytes;
u64_stats_update_end(&pcpu_stats->syncp);
}
static bool lb_transmit(struct team *team, struct sk_buff *skb)
{
struct lb_priv *lb_priv = get_lb_priv(team);
lb_select_tx_port_func_t *select_tx_port_func;
struct team_port *port;
unsigned char hash;
unsigned int tx_bytes = skb->len;
hash = lb_get_skb_hash(lb_priv, skb);
select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
port = select_tx_port_func(team, lb_priv, skb, hash);
if (unlikely(!port))
goto drop;
if (team_dev_queue_xmit(team, port, skb))
return false;
lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash);
return true;
drop:
dev_kfree_skb_any(skb);
return false;
}
static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
if (!lb_priv->ex->orig_fprog) {
ctx->data.bin_val.len = 0;
ctx->data.bin_val.ptr = NULL;
return 0;
}
ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
sizeof(struct sock_filter);
ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
return 0;
}
static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
const void *data)
{
struct sock_fprog *fprog;
struct sock_filter *filter = (struct sock_filter *) data;
if (data_len % sizeof(struct sock_filter))
return -EINVAL;
fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
if (!fprog)
return -ENOMEM;
fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
if (!fprog->filter) {
kfree(fprog);
return -ENOMEM;
}
fprog->len = data_len / sizeof(struct sock_filter);
*pfprog = fprog;
return 0;
}
static void __fprog_destroy(struct sock_fprog *fprog)
{
kfree(fprog->filter);
kfree(fprog);
}
static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL;
struct sk_filter *orig_fp;
struct sock_fprog *fprog = NULL;
int err;
if (ctx->data.bin_val.len) {
err = __fprog_create(&fprog, ctx->data.bin_val.len,
ctx->data.bin_val.ptr);
if (err)
return err;
err = sk_unattached_filter_create(&fp, fprog);
if (err) {
__fprog_destroy(fprog);
return err;
}
}
if (lb_priv->ex->orig_fprog) {
/* Clear old filter data */
__fprog_destroy(lb_priv->ex->orig_fprog);
orig_fp = rcu_dereference_protected(lb_priv->fp,
lockdep_is_held(&team->lock));
sk_unattached_filter_destroy(orig_fp);
}
rcu_assign_pointer(lb_priv->fp, fp);
lb_priv->ex->orig_fprog = fprog;
return 0;
}
static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
lb_select_tx_port_func_t *func;
char *name;
func = rcu_dereference_protected(lb_priv->select_tx_port_func,
lockdep_is_held(&team->lock));
name = lb_select_tx_port_get_name(func);
BUG_ON(!name);
ctx->data.str_val = name;
return 0;
}
static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
lb_select_tx_port_func_t *func;
func = lb_select_tx_port_get_func(ctx->data.str_val);
if (!func)
return -EINVAL;
rcu_assign_pointer(lb_priv->select_tx_port_func, func);
return 0;
}
static int lb_tx_hash_to_port_mapping_init(struct team *team,
struct team_option_inst_info *info)
{
struct lb_priv *lb_priv = get_lb_priv(team);
unsigned char hash = info->array_index;
LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
return 0;
}
static int lb_tx_hash_to_port_mapping_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
struct team_port *port;
unsigned char hash = ctx->info->array_index;
port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
ctx->data.u32_val = port ? port->dev->ifindex : 0;
return 0;
}
static int lb_tx_hash_to_port_mapping_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
struct team_port *port;
unsigned char hash = ctx->info->array_index;
list_for_each_entry(port, &team->port_list, list) {
if (ctx->data.u32_val == port->dev->ifindex &&
team_port_enabled(port)) {
rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash),
port);
return 0;
}
}
return -ENODEV;
}
static int lb_hash_stats_init(struct team *team,
struct team_option_inst_info *info)
{
struct lb_priv *lb_priv = get_lb_priv(team);
unsigned char hash = info->array_index;
lb_priv->ex->stats.info[hash].opt_inst_info = info;
return 0;
}
static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
unsigned char hash = ctx->info->array_index;
ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
ctx->data.bin_val.len = sizeof(struct lb_stats);
return 0;
}
static int lb_port_stats_init(struct team *team,
struct team_option_inst_info *info)
{
struct team_port *port = info->port;
struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
lb_port_priv->stats_info.opt_inst_info = info;
return 0;
}
static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
ctx->data.bin_val.len = sizeof(struct lb_stats);
return 0;
}
static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
{
memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats));
memset(&s_info->stats, 0, sizeof(struct lb_stats));
}
static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info,
struct team *team)
{
if (memcmp(&s_info->last_stats, &s_info->stats,
sizeof(struct lb_stats))) {
team_option_inst_set_change(s_info->opt_inst_info);
return true;
}
return false;
}
static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
struct lb_stats *cpu_stats,
struct u64_stats_sync *syncp)
{
unsigned int start;
struct lb_stats tmp;
do {
start = u64_stats_fetch_begin_irq(syncp);
tmp.tx_bytes = cpu_stats->tx_bytes;
} while (u64_stats_fetch_retry_irq(syncp, start));
acc_stats->tx_bytes += tmp.tx_bytes;
}
static void lb_stats_refresh(struct work_struct *work)
{
struct team *team;
struct lb_priv *lb_priv;
struct lb_priv_ex *lb_priv_ex;
struct lb_pcpu_stats *pcpu_stats;
struct lb_stats *stats;
struct lb_stats_info *s_info;
struct team_port *port;
bool changed = false;
int i;
int j;
lb_priv_ex = container_of(work, struct lb_priv_ex,
stats.refresh_dw.work);
team = lb_priv_ex->team;
lb_priv = get_lb_priv(team);
if (!mutex_trylock(&team->lock)) {
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
return;
}
for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) {
s_info = &lb_priv->ex->stats.info[j];
__lb_stats_info_refresh_prepare(s_info);
for_each_possible_cpu(i) {
pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
stats = &pcpu_stats->hash_stats[j];
__lb_one_cpu_stats_add(&s_info->stats, stats,
&pcpu_stats->syncp);
}
changed |= __lb_stats_info_refresh_check(s_info, team);
}
list_for_each_entry(port, &team->port_list, list) {
struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
s_info = &lb_port_priv->stats_info;
__lb_stats_info_refresh_prepare(s_info);
for_each_possible_cpu(i) {
pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i);
__lb_one_cpu_stats_add(&s_info->stats, stats,
&pcpu_stats->syncp);
}
changed |= __lb_stats_info_refresh_check(s_info, team);
}
if (changed)
team_options_change_check(team);
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
(lb_priv_ex->stats.refresh_interval * HZ) / 10);
mutex_unlock(&team->lock);
}
static int lb_stats_refresh_interval_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
return 0;
}
static int lb_stats_refresh_interval_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
unsigned int interval;
interval = ctx->data.u32_val;
if (lb_priv->ex->stats.refresh_interval == interval)
return 0;
lb_priv->ex->stats.refresh_interval = interval;
if (interval)
schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0);
else
cancel_delayed_work(&lb_priv->ex->stats.refresh_dw);
return 0;
}
static const struct team_option lb_options[] = {
{
.name = "bpf_hash_func",
.type = TEAM_OPTION_TYPE_BINARY,
.getter = lb_bpf_func_get,
.setter = lb_bpf_func_set,
},
{
.name = "lb_tx_method",
.type = TEAM_OPTION_TYPE_STRING,
.getter = lb_tx_method_get,
.setter = lb_tx_method_set,
},
{
.name = "lb_tx_hash_to_port_mapping",
.array_size = LB_TX_HASHTABLE_SIZE,
.type = TEAM_OPTION_TYPE_U32,
.init = lb_tx_hash_to_port_mapping_init,
.getter = lb_tx_hash_to_port_mapping_get,
.setter = lb_tx_hash_to_port_mapping_set,
},
{
.name = "lb_hash_stats",
.array_size = LB_TX_HASHTABLE_SIZE,
.type = TEAM_OPTION_TYPE_BINARY,
.init = lb_hash_stats_init,
.getter = lb_hash_stats_get,
},
{
.name = "lb_port_stats",
.per_port = true,
.type = TEAM_OPTION_TYPE_BINARY,
.init = lb_port_stats_init,
.getter = lb_port_stats_get,
},
{
.name = "lb_stats_refresh_interval",
.type = TEAM_OPTION_TYPE_U32,
.getter = lb_stats_refresh_interval_get,
.setter = lb_stats_refresh_interval_set,
},
};
static int lb_init(struct team *team)
{
struct lb_priv *lb_priv = get_lb_priv(team);
lb_select_tx_port_func_t *func;
int i, err;
/* set default tx port selector */
func = lb_select_tx_port_get_func("hash");
BUG_ON(!func);
rcu_assign_pointer(lb_priv->select_tx_port_func, func);
lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL);
if (!lb_priv->ex)
return -ENOMEM;
lb_priv->ex->team = team;
lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
if (!lb_priv->pcpu_stats) {
err = -ENOMEM;
goto err_alloc_pcpu_stats;
}
for_each_possible_cpu(i) {
struct lb_pcpu_stats *team_lb_stats;
team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
u64_stats_init(&team_lb_stats->syncp);
}
INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
if (err)
goto err_options_register;
return 0;
err_options_register:
free_percpu(lb_priv->pcpu_stats);
err_alloc_pcpu_stats:
kfree(lb_priv->ex);
return err;
}
static void lb_exit(struct team *team)
{
struct lb_priv *lb_priv = get_lb_priv(team);
team_options_unregister(team, lb_options,
ARRAY_SIZE(lb_options));
cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
free_percpu(lb_priv->pcpu_stats);
kfree(lb_priv->ex);
}
static int lb_port_enter(struct team *team, struct team_port *port)
{
struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
if (!lb_port_priv->pcpu_stats)
return -ENOMEM;
return 0;
}
static void lb_port_leave(struct team *team, struct team_port *port)
{
struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
free_percpu(lb_port_priv->pcpu_stats);
}
static void lb_port_disabled(struct team *team, struct team_port *port)
{
lb_tx_hash_to_port_mapping_null_port(team, port);
}
static const struct team_mode_ops lb_mode_ops = {
.init = lb_init,
.exit = lb_exit,
.port_enter = lb_port_enter,
.port_leave = lb_port_leave,
.port_disabled = lb_port_disabled,
.transmit = lb_transmit,
};
static const struct team_mode lb_mode = {
.kind = "loadbalance",
.owner = THIS_MODULE,
.priv_size = sizeof(struct lb_priv),
.port_priv_size = sizeof(struct lb_port_priv),
.ops = &lb_mode_ops,
};
static int __init lb_init_module(void)
{
return team_mode_register(&lb_mode);
}
static void __exit lb_cleanup_module(void)
{
team_mode_unregister(&lb_mode);
}
module_init(lb_init_module);
module_exit(lb_cleanup_module);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
MODULE_DESCRIPTION("Load-balancing mode for team");
MODULE_ALIAS("team-mode-loadbalance");
| gpl-2.0 |
janarthananfit/android_kernel_msm_beni | net/sctp/ulpevent.c | 1089 | 30343 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* These functions manipulate an sctp event. The struct ulpevent is used
* to carry notifications and data to the ULP (sockets).
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <net/sctp/structs.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
struct sctp_association *asoc);
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
/* Initialize an ULP event from an given skb. */
SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event,
int msg_flags,
unsigned int len)
{
memset(event, 0, sizeof(struct sctp_ulpevent));
event->msg_flags = msg_flags;
event->rmem_len = len;
}
/* Create a new sctp_ulpevent. */
SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags,
gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sk_buff *skb;
skb = alloc_skb(size, gfp);
if (!skb)
goto fail;
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, msg_flags, skb->truesize);
return event;
fail:
return NULL;
}
/* Is this a MSG_NOTIFICATION? */
int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event)
{
return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION);
}
/* Hold the association in case the msg_name needs read out of
* the association.
*/
static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
const struct sctp_association *asoc)
{
struct sk_buff *skb;
/* Cast away the const, as we are just wanting to
* bump the reference count.
*/
sctp_association_hold((struct sctp_association *)asoc);
skb = sctp_event2skb(event);
event->asoc = (struct sctp_association *)asoc;
atomic_add(event->rmem_len, &event->asoc->rmem_alloc);
sctp_skb_set_owner_r(skb, asoc->base.sk);
}
/* A simple destructor to give up the reference to the association. */
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
{
struct sctp_association *asoc = event->asoc;
atomic_sub(event->rmem_len, &asoc->rmem_alloc);
sctp_association_put(asoc);
}
/* Create and initialize an SCTP_ASSOC_CHANGE event.
*
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* Communication notifications inform the ULP that an SCTP association
* has either begun or ended. The identifier for a new association is
* provided by this notification.
*
* Note: There is no field checking here. If a field is unused it will be
* zero'd out.
*/
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
__u16 flags, __u16 state, __u16 error, __u16 outbound,
__u16 inbound, struct sctp_chunk *chunk, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_assoc_change *sac;
struct sk_buff *skb;
/* If the lower layer passed in the chunk, it will be
* an ABORT, so we need to include it in the sac_info.
*/
if (chunk) {
/* Copy the chunk data to a new skb and reserve enough
* head room to use as notification.
*/
skb = skb_copy_expand(chunk->skb,
sizeof(struct sctp_assoc_change), 0, gfp);
if (!skb)
goto fail;
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
/* Include the notification structure */
sac = (struct sctp_assoc_change *)
skb_push(skb, sizeof(struct sctp_assoc_change));
/* Trim the buffer to the right length. */
skb_trim(skb, sizeof(struct sctp_assoc_change) +
ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
} else {
event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
MSG_NOTIFICATION, gfp);
if (!event)
goto fail;
skb = sctp_event2skb(event);
sac = (struct sctp_assoc_change *) skb_put(skb,
sizeof(struct sctp_assoc_change));
}
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* sac_type:
* It should be SCTP_ASSOC_CHANGE.
*/
sac->sac_type = SCTP_ASSOC_CHANGE;
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* sac_state: 32 bits (signed integer)
* This field holds one of a number of values that communicate the
* event that happened to the association.
*/
sac->sac_state = state;
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* sac_flags: 16 bits (unsigned integer)
* Currently unused.
*/
sac->sac_flags = 0;
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* sac_length: sizeof (__u32)
* This field is the total length of the notification data, including
* the notification header.
*/
sac->sac_length = skb->len;
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* sac_error: 32 bits (signed integer)
*
* If the state was reached due to a error condition (e.g.
* COMMUNICATION_LOST) any relevant error information is available in
* this field. This corresponds to the protocol error codes defined in
* [SCTP].
*/
sac->sac_error = error;
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* sac_outbound_streams: 16 bits (unsigned integer)
* sac_inbound_streams: 16 bits (unsigned integer)
*
* The maximum number of streams allowed in each direction are
* available in sac_outbound_streams and sac_inbound streams.
*/
sac->sac_outbound_streams = outbound;
sac->sac_inbound_streams = inbound;
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* sac_assoc_id: sizeof (sctp_assoc_t)
*
* The association id field, holds the identifier for the association.
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event, asoc);
sac->sac_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
/* Create and initialize an SCTP_PEER_ADDR_CHANGE event.
*
* Socket Extensions for SCTP - draft-01
* 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* When a destination address on a multi-homed peer encounters a change
* an interface details event is sent.
*/
struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
const struct sctp_association *asoc,
const struct sockaddr_storage *aaddr,
int flags, int state, int error, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_paddr_change *spc;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change),
MSG_NOTIFICATION, gfp);
if (!event)
goto fail;
skb = sctp_event2skb(event);
spc = (struct sctp_paddr_change *)
skb_put(skb, sizeof(struct sctp_paddr_change));
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_type:
*
* It should be SCTP_PEER_ADDR_CHANGE.
*/
spc->spc_type = SCTP_PEER_ADDR_CHANGE;
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_length: sizeof (__u32)
*
* This field is the total length of the notification data, including
* the notification header.
*/
spc->spc_length = sizeof(struct sctp_paddr_change);
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_flags: 16 bits (unsigned integer)
* Currently unused.
*/
spc->spc_flags = 0;
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_state: 32 bits (signed integer)
*
* This field holds one of a number of values that communicate the
* event that happened to the address.
*/
spc->spc_state = state;
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_error: 32 bits (signed integer)
*
* If the state was reached due to any error condition (e.g.
* ADDRESS_UNREACHABLE) any relevant error information is available in
* this field.
*/
spc->spc_error = error;
/* Socket Extensions for SCTP
* 5.3.1.1 SCTP_ASSOC_CHANGE
*
* spc_assoc_id: sizeof (sctp_assoc_t)
*
* The association id field, holds the identifier for the association.
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event, asoc);
spc->spc_assoc_id = sctp_assoc2id(asoc);
/* Sockets API Extensions for SCTP
* Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE
*
* spc_aaddr: sizeof (struct sockaddr_storage)
*
* The affected address field, holds the remote peer's address that is
* encountering the change of state.
*/
memcpy(&spc->spc_aaddr, aaddr, sizeof(struct sockaddr_storage));
/* Map ipv4 address into v4-mapped-on-v6 address. */
sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_v4map(
sctp_sk(asoc->base.sk),
(union sctp_addr *)&spc->spc_aaddr);
return event;
fail:
return NULL;
}
/* Create and initialize an SCTP_REMOTE_ERROR notification.
*
* Note: This assumes that the chunk->skb->data already points to the
* operation error payload.
*
* Socket Extensions for SCTP - draft-01
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* A remote peer may send an Operational Error message to its peer.
* This message indicates a variety of error conditions on an
* association. The entire error TLV as it appears on the wire is
* included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc, struct sctp_chunk *chunk,
__u16 flags, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
struct sk_buff *skb;
sctp_errhdr_t *ch;
__be16 cause;
int elen;
ch = (sctp_errhdr_t *)(chunk->skb->data);
cause = ch->cause;
elen = WORD_ROUND(ntohs(ch->length)) - sizeof(sctp_errhdr_t);
/* Pull off the ERROR header. */
skb_pull(chunk->skb, sizeof(sctp_errhdr_t));
/* Copy the skb to a new skb with room for us to prepend
* notification with.
*/
skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
0, gfp);
/* Pull off the rest of the cause TLV from the chunk. */
skb_pull(chunk->skb, elen);
if (!skb)
goto fail;
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
sre = (struct sctp_remote_error *)
skb_push(skb, sizeof(struct sctp_remote_error));
/* Trim the buffer to the right length. */
skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_type:
* It should be SCTP_REMOTE_ERROR.
*/
sre->sre_type = SCTP_REMOTE_ERROR;
/*
* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_flags: 16 bits (unsigned integer)
* Currently unused.
*/
sre->sre_flags = 0;
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_length: sizeof (__u32)
*
* This field is the total length of the notification data,
* including the notification header.
*/
sre->sre_length = skb->len;
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_error: 16 bits (unsigned integer)
* This value represents one of the Operational Error causes defined in
* the SCTP specification, in network byte order.
*/
sre->sre_error = cause;
/* Socket Extensions for SCTP
* 5.3.1.3 SCTP_REMOTE_ERROR
*
* sre_assoc_id: sizeof (sctp_assoc_t)
*
* The association id field, holds the identifier for the association.
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
/* Create and initialize a SCTP_SEND_FAILED notification.
*
* Socket Extensions for SCTP - draft-01
* 5.3.1.4 SCTP_SEND_FAILED
*/
struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
const struct sctp_association *asoc, struct sctp_chunk *chunk,
__u16 flags, __u32 error, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_send_failed *ssf;
struct sk_buff *skb;
/* Pull off any padding. */
int len = ntohs(chunk->chunk_hdr->length);
/* Make skb with more room so we can prepend notification. */
skb = skb_copy_expand(chunk->skb,
sizeof(struct sctp_send_failed), /* headroom */
0, /* tailroom */
gfp);
if (!skb)
goto fail;
/* Pull off the common chunk header and DATA header. */
skb_pull(skb, sizeof(struct sctp_data_chunk));
len -= sizeof(struct sctp_data_chunk);
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
ssf = (struct sctp_send_failed *)
skb_push(skb, sizeof(struct sctp_send_failed));
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
*
* ssf_type:
* It should be SCTP_SEND_FAILED.
*/
ssf->ssf_type = SCTP_SEND_FAILED;
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
*
* ssf_flags: 16 bits (unsigned integer)
* The flag value will take one of the following values
*
* SCTP_DATA_UNSENT - Indicates that the data was never put on
* the wire.
*
* SCTP_DATA_SENT - Indicates that the data was put on the wire.
* Note that this does not necessarily mean that the
* data was (or was not) successfully delivered.
*/
ssf->ssf_flags = flags;
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
*
* ssf_length: sizeof (__u32)
* This field is the total length of the notification data, including
* the notification header.
*/
ssf->ssf_length = sizeof(struct sctp_send_failed) + len;
skb_trim(skb, ssf->ssf_length);
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
*
* ssf_error: 16 bits (unsigned integer)
* This value represents the reason why the send failed, and if set,
* will be a SCTP protocol error code as defined in [SCTP] section
* 3.3.10.
*/
ssf->ssf_error = error;
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
*
* ssf_info: sizeof (struct sctp_sndrcvinfo)
* The original send information associated with the undelivered
* message.
*/
memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo));
/* Per TSVWG discussion with Randy. Allow the application to
* ressemble a fragmented message.
*/
ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags;
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
*
* ssf_assoc_id: sizeof (sctp_assoc_t)
* The association id field, sf_assoc_id, holds the identifier for the
* association. All notifications for a given association have the
* same association identifier. For TCP style socket, this field is
* ignored.
*/
sctp_ulpevent_set_owner(event, asoc);
ssf->ssf_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
/* Create and initialize a SCTP_SHUTDOWN_EVENT notification.
*
* Socket Extensions for SCTP - draft-01
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*/
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const struct sctp_association *asoc,
__u16 flags, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_shutdown_event *sse;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event),
MSG_NOTIFICATION, gfp);
if (!event)
goto fail;
skb = sctp_event2skb(event);
sse = (struct sctp_shutdown_event *)
skb_put(skb, sizeof(struct sctp_shutdown_event));
/* Socket Extensions for SCTP
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*
* sse_type
* It should be SCTP_SHUTDOWN_EVENT
*/
sse->sse_type = SCTP_SHUTDOWN_EVENT;
/* Socket Extensions for SCTP
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*
* sse_flags: 16 bits (unsigned integer)
* Currently unused.
*/
sse->sse_flags = 0;
/* Socket Extensions for SCTP
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*
* sse_length: sizeof (__u32)
* This field is the total length of the notification data, including
* the notification header.
*/
sse->sse_length = sizeof(struct sctp_shutdown_event);
/* Socket Extensions for SCTP
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*
* sse_assoc_id: sizeof (sctp_assoc_t)
* The association id field, holds the identifier for the association.
* All notifications for a given association have the same association
* identifier. For TCP style socket, this field is ignored.
*/
sctp_ulpevent_set_owner(event, asoc);
sse->sse_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
/* Create and initialize a SCTP_ADAPTATION_INDICATION notification.
*
* Socket Extensions for SCTP
* 5.3.1.6 SCTP_ADAPTATION_INDICATION
*/
struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
const struct sctp_association *asoc, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_adaptation_event *sai;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event),
MSG_NOTIFICATION, gfp);
if (!event)
goto fail;
skb = sctp_event2skb(event);
sai = (struct sctp_adaptation_event *)
skb_put(skb, sizeof(struct sctp_adaptation_event));
sai->sai_type = SCTP_ADAPTATION_INDICATION;
sai->sai_flags = 0;
sai->sai_length = sizeof(struct sctp_adaptation_event);
sai->sai_adaptation_ind = asoc->peer.adaptation_ind;
sctp_ulpevent_set_owner(event, asoc);
sai->sai_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
/* A message has been received. Package this message as a notification
* to pass it to the upper layers. Go ahead and calculate the sndrcvinfo
* even if filtered out later.
*
* Socket Extensions for SCTP
* 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*/
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_ulpevent *event = NULL;
struct sk_buff *skb;
size_t padding, len;
int rx_count;
/*
* check to see if we need to make space for this
* new skb, expand the rcvbuffer if needed, or drop
* the frame
*/
if (asoc->ep->rcvbuf_policy)
rx_count = atomic_read(&asoc->rmem_alloc);
else
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
(!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
goto fail;
}
/* Clone the original skb, sharing the data. */
skb = skb_clone(chunk->skb, gfp);
if (!skb)
goto fail;
/* Now that all memory allocations for this chunk succeeded, we
* can mark it as received so the tsn_map is updated correctly.
*/
if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
ntohl(chunk->subh.data_hdr->tsn)))
goto fail_mark;
/* First calculate the padding, so we don't inadvertently
* pass up the wrong length to the user.
*
* RFC 2960 - Section 3.2 Chunk Field Descriptions
*
* The total length of a chunk(including Type, Length and Value fields)
* MUST be a multiple of 4 bytes. If the length of the chunk is not a
* multiple of 4 bytes, the sender MUST pad the chunk with all zero
* bytes and this padding is not included in the chunk length field.
* The sender should never pad with more than 3 bytes. The receiver
* MUST ignore the padding bytes.
*/
len = ntohs(chunk->chunk_hdr->length);
padding = WORD_ROUND(len) - len;
/* Fixup cloned skb with just this chunks data. */
skb_trim(skb, chunk->chunk_end - padding - skb->data);
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
/* Initialize event with flags 0 and correct length
* Since this is a clone of the original skb, only account for
* the data of this chunk as other chunks will be accounted separately.
*/
sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
sctp_ulpevent_receive_data(event, asoc);
event->stream = ntohs(chunk->subh.data_hdr->stream);
event->ssn = ntohs(chunk->subh.data_hdr->ssn);
event->ppid = chunk->subh.data_hdr->ppid;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
event->flags |= SCTP_UNORDERED;
event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
}
event->tsn = ntohl(chunk->subh.data_hdr->tsn);
event->msg_flags |= chunk->chunk_hdr->flags;
event->iif = sctp_chunk_iif(chunk);
return event;
fail_mark:
kfree_skb(skb);
fail:
return NULL;
}
/* Create a partial delivery related event.
*
* 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT
*
* When a receiver is engaged in a partial delivery of a
* message this notification will be used to indicate
* various events.
*/
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc, __u32 indication,
gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_pdapi_event *pd;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event),
MSG_NOTIFICATION, gfp);
if (!event)
goto fail;
skb = sctp_event2skb(event);
pd = (struct sctp_pdapi_event *)
skb_put(skb, sizeof(struct sctp_pdapi_event));
/* pdapi_type
* It should be SCTP_PARTIAL_DELIVERY_EVENT
*
* pdapi_flags: 16 bits (unsigned integer)
* Currently unused.
*/
pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
pd->pdapi_flags = 0;
/* pdapi_length: 32 bits (unsigned integer)
*
* This field is the total length of the notification data, including
* the notification header. It will generally be sizeof (struct
* sctp_pdapi_event).
*/
pd->pdapi_length = sizeof(struct sctp_pdapi_event);
/* pdapi_indication: 32 bits (unsigned integer)
*
* This field holds the indication being sent to the application.
*/
pd->pdapi_indication = indication;
/* pdapi_assoc_id: sizeof (sctp_assoc_t)
*
* The association id field, holds the identifier for the association.
*/
sctp_ulpevent_set_owner(event, asoc);
pd->pdapi_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
struct sctp_ulpevent *sctp_ulpevent_make_authkey(
const struct sctp_association *asoc, __u16 key_id,
__u32 indication, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_authkey_event *ak;
struct sk_buff *skb;
event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event),
MSG_NOTIFICATION, gfp);
if (!event)
goto fail;
skb = sctp_event2skb(event);
ak = (struct sctp_authkey_event *)
skb_put(skb, sizeof(struct sctp_authkey_event));
ak->auth_type = SCTP_AUTHENTICATION_INDICATION;
ak->auth_flags = 0;
ak->auth_length = sizeof(struct sctp_authkey_event);
ak->auth_keynumber = key_id;
ak->auth_altkeynumber = 0;
ak->auth_indication = indication;
/*
* The association id field, holds the identifier for the association.
*/
sctp_ulpevent_set_owner(event, asoc);
ak->auth_assoc_id = sctp_assoc2id(asoc);
return event;
fail:
return NULL;
}
/* Return the notification type, assuming this is a notification
* event.
*/
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
{
union sctp_notification *notification;
struct sk_buff *skb;
skb = sctp_event2skb(event);
notification = (union sctp_notification *) skb->data;
return notification->sn_header.sn_type;
}
/* Copy out the sndrcvinfo into a msghdr. */
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
struct sctp_sndrcvinfo sinfo;
if (sctp_ulpevent_is_notification(event))
return;
/* Sockets API Extensions for SCTP
* Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*
* sinfo_stream: 16 bits (unsigned integer)
*
* For recvmsg() the SCTP stack places the message's stream number in
* this value.
*/
sinfo.sinfo_stream = event->stream;
/* sinfo_ssn: 16 bits (unsigned integer)
*
* For recvmsg() this value contains the stream sequence number that
* the remote endpoint placed in the DATA chunk. For fragmented
* messages this is the same number for all deliveries of the message
* (if more than one recvmsg() is needed to read the message).
*/
sinfo.sinfo_ssn = event->ssn;
/* sinfo_ppid: 32 bits (unsigned integer)
*
* In recvmsg() this value is
* the same information that was passed by the upper layer in the peer
* application. Please note that byte order issues are NOT accounted
* for and this information is passed opaquely by the SCTP stack from
* one end to the other.
*/
sinfo.sinfo_ppid = event->ppid;
/* sinfo_flags: 16 bits (unsigned integer)
*
* This field may contain any of the following flags and is composed of
* a bitwise OR of these values.
*
* recvmsg() flags:
*
* SCTP_UNORDERED - This flag is present when the message was sent
* non-ordered.
*/
sinfo.sinfo_flags = event->flags;
/* sinfo_tsn: 32 bit (unsigned integer)
*
* For the receiving side, this field holds a TSN that was
* assigned to one of the SCTP Data Chunks.
*/
sinfo.sinfo_tsn = event->tsn;
/* sinfo_cumtsn: 32 bit (unsigned integer)
*
* This field will hold the current cumulative TSN as
* known by the underlying SCTP layer. Note this field is
* ignored when sending and only valid for a receive
* operation when sinfo_flags are set to SCTP_UNORDERED.
*/
sinfo.sinfo_cumtsn = event->cumtsn;
/* sinfo_assoc_id: sizeof (sctp_assoc_t)
*
* The association handle field, sinfo_assoc_id, holds the identifier
* for the association announced in the COMMUNICATION_UP notification.
* All notifications for a given association have the same identifier.
* Ignored for one-to-one style sockets.
*/
sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
/* context value that is set via SCTP_CONTEXT socket option. */
sinfo.sinfo_context = event->asoc->default_rcv_context;
/* These fields are not used while receiving. */
sinfo.sinfo_timetolive = 0;
put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
}
/* Do accounting for bytes received and hold a reference to the association
* for each skb.
*/
static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
struct sctp_association *asoc)
{
struct sk_buff *skb, *frag;
skb = sctp_event2skb(event);
/* Set the owner and charge rwnd for bytes received. */
sctp_ulpevent_set_owner(event, asoc);
sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
if (!skb->data_len)
return;
/* Note: Not clearing the entire event struct as this is just a
* fragment of the real event. However, we still need to do rwnd
* accounting.
* In general, the skb passed from IP can have only 1 level of
* fragments. But we allow multiple levels of fragments.
*/
skb_walk_frags(skb, frag)
sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc);
}
/* Do accounting for bytes just read by user and release the references to
* the association.
*/
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
{
struct sk_buff *skb, *frag;
unsigned int len;
/* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as
* multiple associations may be on a single UDP-style socket.
* Use the local private area of the skb to track the owning
* association.
*/
skb = sctp_event2skb(event);
len = skb->len;
if (!skb->data_len)
goto done;
/* Don't forget the fragments. */
skb_walk_frags(skb, frag) {
/* NOTE: skb_shinfos are recursive. Although IP returns
* skb's with only 1 level of fragments, SCTP reassembly can
* increase the levels.
*/
sctp_ulpevent_release_frag_data(sctp_skb2event(frag));
}
done:
sctp_assoc_rwnd_increase(event->asoc, len);
sctp_ulpevent_release_owner(event);
}
static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
{
struct sk_buff *skb, *frag;
skb = sctp_event2skb(event);
if (!skb->data_len)
goto done;
/* Don't forget the fragments. */
skb_walk_frags(skb, frag) {
/* NOTE: skb_shinfos are recursive. Although IP returns
* skb's with only 1 level of fragments, SCTP reassembly can
* increase the levels.
*/
sctp_ulpevent_release_frag_data(sctp_skb2event(frag));
}
done:
sctp_ulpevent_release_owner(event);
}
/* Free a ulpevent that has an owner. It includes releasing the reference
* to the owner, updating the rwnd in case of a DATA event and freeing the
* skb.
*/
void sctp_ulpevent_free(struct sctp_ulpevent *event)
{
if (sctp_ulpevent_is_notification(event))
sctp_ulpevent_release_owner(event);
else
sctp_ulpevent_release_data(event);
kfree_skb(sctp_event2skb(event));
}
/* Purge the skb lists holding ulpevents. */
void sctp_queue_purge_ulpevents(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL)
sctp_ulpevent_free(sctp_skb2event(skb));
}
| gpl-2.0 |
simon-rock/linux-2.6.32-rc8 | arch/powerpc/platforms/85xx/sbc8560.c | 1601 | 7730 | /*
* Wind River SBC8560 setup and early boot code.
*
* Copyright 2007 Wind River Systems Inc.
*
* By Paul Gortmaker (see MAINTAINERS for contact information)
*
* Based largely on the MPC8560ADS support - Copyright 2005 Freescale Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
#include <sysdev/cpm2_pic.h>
#endif
#ifdef CONFIG_CPM2
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
desc->chip->eoi(irq);
}
#endif /* CONFIG_CPM2 */
static void __init sbc8560_pic_init(void)
{
struct mpic *mpic;
struct resource r;
struct device_node *np = NULL;
#ifdef CONFIG_CPM2
int irq;
#endif
np = of_find_node_by_type(np, "open-pic");
if (!np) {
printk(KERN_ERR "Could not find open-pic node\n");
return;
}
if (of_address_to_resource(np, 0, &r)) {
printk(KERN_ERR "Could not map mpic register space\n");
of_node_put(np);
return;
}
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);
mpic_init(mpic);
#ifdef CONFIG_CPM2
/* Setup CPM2 PIC */
np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic");
if (np == NULL) {
printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n");
return;
}
irq = irq_of_parse_and_map(np, 0);
cpm2_pic_init(np);
of_node_put(np);
set_irq_chained_handler(irq, cpm2_cascade);
#endif
}
/*
* Setup the architecture
*/
#ifdef CONFIG_CPM2
struct cpm_pin {
int port, pin, flags;
};
static const struct cpm_pin sbc8560_pins[] = {
/* SCC1 */
{3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
/* SCC2 */
{3, 26, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
/* FCC2 */
{1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK14 */
{2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK13 */
/* FCC3 */
{1, 4, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 6, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 7, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 9, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 14, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 15, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 16, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* CLK16 */
{2, 17, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* CLK15 */
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(sbc8560_pins); i++) {
const struct cpm_pin *pin = &sbc8560_pins[i];
cpm2_set_pin(pin->port, pin->pin, pin->flags);
}
cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK15, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK16, CPM_CLK_TX);
}
#endif
static void __init sbc8560_setup_arch(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
#endif
if (ppc_md.progress)
ppc_md.progress("sbc8560_setup_arch()", 0);
#ifdef CONFIG_CPM2
cpm2_reset();
init_ioports();
#endif
#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
fsl_add_bridge(np, 1);
#endif
}
static void sbc8560_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: Wind River\n");
seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
static struct of_device_id __initdata of_bus_ids[] = {
{ .name = "soc", },
{ .type = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(sbc8560, declare_of_platform_devices);
/*
* Called very early, device-tree isn't unflattened
*/
static int __init sbc8560_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "SBC8560");
}
#ifdef CONFIG_RTC_DRV_M48T59
static int __init sbc8560_rtc_init(void)
{
struct device_node *np;
struct resource res;
struct platform_device *rtc_dev;
np = of_find_compatible_node(NULL, NULL, "m48t59");
if (np == NULL) {
printk("No RTC in DTB. Has it been eaten by wild dogs?\n");
return -ENODEV;
}
of_address_to_resource(np, 0, &res);
of_node_put(np);
printk("Found RTC (m48t59) at i/o 0x%x\n", res.start);
rtc_dev = platform_device_register_simple("rtc-m48t59", 0, &res, 1);
if (IS_ERR(rtc_dev)) {
printk("Registering sbc8560 RTC device failed\n");
return PTR_ERR(rtc_dev);
}
return 0;
}
arch_initcall(sbc8560_rtc_init);
#endif /* M48T59 */
static __u8 __iomem *brstcr;
static int __init sbc8560_bdrstcr_init(void)
{
struct device_node *np;
struct resource res;
np = of_find_compatible_node(NULL, NULL, "wrs,sbc8560-brstcr");
if (np == NULL) {
printk(KERN_WARNING "sbc8560: No board specific RSTCR in DTB.\n");
return -ENODEV;
}
of_address_to_resource(np, 0, &res);
printk(KERN_INFO "sbc8560: Found BRSTCR at i/o 0x%x\n", res.start);
brstcr = ioremap(res.start, res.end - res.start);
if(!brstcr)
printk(KERN_WARNING "sbc8560: ioremap of brstcr failed.\n");
of_node_put(np);
return 0;
}
arch_initcall(sbc8560_bdrstcr_init);
void sbc8560_rstcr_restart(char * cmd)
{
local_irq_disable();
if(brstcr)
clrbits8(brstcr, 0x80);
while(1);
}
define_machine(sbc8560) {
.name = "SBC8560",
.probe = sbc8560_probe,
.setup_arch = sbc8560_setup_arch,
.init_IRQ = sbc8560_pic_init,
.show_cpuinfo = sbc8560_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = sbc8560_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
osmc/vero2-linux | drivers/tty/serial/bfin_uart.c | 2113 | 40286 | /*
* Blackfin On-Chip Serial Driver
*
* Copyright 2006-2011 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Licensed under the GPL-2 or later.
*/
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#define DRIVER_NAME "bfin-uart"
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/kgdb.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <asm/portmux.h>
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/bfin_serial.h>
#ifdef CONFIG_SERIAL_BFIN_MODULE
# undef CONFIG_EARLY_PRINTK
#endif
#ifdef CONFIG_SERIAL_BFIN_MODULE
# undef CONFIG_EARLY_PRINTK
#endif
/* UART name and device definitions */
#define BFIN_SERIAL_DEV_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
#define BFIN_SERIAL_MINOR 64
static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
# ifndef CONFIG_SERIAL_BFIN_PIO
# error KGDB only support UART in PIO mode.
# endif
static int kgdboc_port_line;
static int kgdboc_break_enabled;
#endif
/*
* Setup for console. Argument comes from the menuconfig
*/
#define DMA_RX_XCOUNT 512
#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT)
#define DMA_RX_FLUSH_JIFFIES (HZ / 50)
#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
#else
static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
#endif
static void bfin_serial_reset_irda(struct uart_port *port);
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
if (uart->cts_pin < 0)
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
/* CTS PIN is negative assertive. */
if (UART_GET_CTS(uart))
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
else
return TIOCM_DSR | TIOCM_CAR;
}
static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
if (uart->rts_pin < 0)
return;
/* RTS PIN is negative assertive. */
if (mctrl & TIOCM_RTS)
UART_ENABLE_RTS(uart);
else
UART_DISABLE_RTS(uart);
}
/*
* Handle any change of modem status signal.
*/
static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
unsigned int status = bfin_serial_get_mctrl(&uart->port);
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
struct tty_struct *tty = uart->port.state->port.tty;
UART_CLEAR_SCTS(uart);
if (tty->hw_stopped) {
if (status) {
tty->hw_stopped = 0;
uart_write_wakeup(&uart->port);
}
} else {
if (!status)
tty->hw_stopped = 1;
}
#endif
uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
return IRQ_HANDLED;
}
#else
static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
#endif
/*
* interrupts are disabled on entry
*/
static void bfin_serial_stop_tx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
#ifdef CONFIG_SERIAL_BFIN_DMA
struct circ_buf *xmit = &uart->port.state->xmit;
#endif
while (!(UART_GET_LSR(uart) & TEMT))
cpu_relax();
#ifdef CONFIG_SERIAL_BFIN_DMA
disable_dma(uart->tx_dma_channel);
xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx += uart->tx_count;
uart->tx_count = 0;
uart->tx_done = 1;
#else
#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
/* Clear TFI bit */
UART_PUT_LSR(uart, TFI);
#endif
UART_CLEAR_IER(uart, ETBEI);
#endif
}
/*
* port is locked and interrupts are disabled
*/
static void bfin_serial_start_tx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
struct tty_struct *tty = uart->port.state->port.tty;
/*
* To avoid losting RX interrupt, we reset IR function
* before sending data.
*/
if (tty->termios.c_line == N_IRDA)
bfin_serial_reset_irda(port);
#ifdef CONFIG_SERIAL_BFIN_DMA
if (uart->tx_done)
bfin_serial_dma_tx_chars(uart);
#else
UART_SET_IER(uart, ETBEI);
bfin_serial_tx_chars(uart);
#endif
}
/*
* Interrupts are enabled
*/
static void bfin_serial_stop_rx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
UART_CLEAR_IER(uart, ERBFI);
}
/*
* Set the modem control timer to fire immediately.
*/
static void bfin_serial_enable_ms(struct uart_port *port)
{
}
#if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO)
# define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold)
# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v))
#else
# define UART_GET_ANOMALY_THRESHOLD(uart) 0
# define UART_SET_ANOMALY_THRESHOLD(uart, v)
#endif
#ifdef CONFIG_SERIAL_BFIN_PIO
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
unsigned int status, ch, flg;
static struct timeval anomaly_start = { .tv_sec = 0 };
status = UART_GET_LSR(uart);
UART_CLEAR_LSR(uart);
ch = UART_GET_CHAR(uart);
uart->port.icount.rx++;
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
if (kgdb_connected && kgdboc_port_line == uart->port.line
&& kgdboc_break_enabled)
if (ch == 0x3) {/* Ctrl + C */
kgdb_breakpoint();
return;
}
if (!uart->port.state)
return;
#endif
if (ANOMALY_05000363) {
/* The BF533 (and BF561) family of processors have a nice anomaly
* where they continuously generate characters for a "single" break.
* We have to basically ignore this flood until the "next" valid
* character comes across. Due to the nature of the flood, it is
* not possible to reliably catch bytes that are sent too quickly
* after this break. So application code talking to the Blackfin
* which sends a break signal must allow at least 1.5 character
* times after the end of the break for things to stabilize. This
* timeout was picked as it must absolutely be larger than 1
* character time +/- some percent. So 1.5 sounds good. All other
* Blackfin families operate properly. Woo.
*/
if (anomaly_start.tv_sec) {
struct timeval curr;
suseconds_t usecs;
if ((~ch & (~ch + 1)) & 0xff)
goto known_good_char;
do_gettimeofday(&curr);
if (curr.tv_sec - anomaly_start.tv_sec > 1)
goto known_good_char;
usecs = 0;
if (curr.tv_sec != anomaly_start.tv_sec)
usecs += USEC_PER_SEC;
usecs += curr.tv_usec - anomaly_start.tv_usec;
if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
goto known_good_char;
if (ch)
anomaly_start.tv_sec = 0;
else
anomaly_start = curr;
return;
known_good_char:
status &= ~BI;
anomaly_start.tv_sec = 0;
}
}
if (status & BI) {
if (ANOMALY_05000363)
if (bfin_revid() < 5)
do_gettimeofday(&anomaly_start);
uart->port.icount.brk++;
if (uart_handle_break(&uart->port))
goto ignore_char;
status &= ~(PE | FE);
}
if (status & PE)
uart->port.icount.parity++;
if (status & OE)
uart->port.icount.overrun++;
if (status & FE)
uart->port.icount.frame++;
status &= uart->port.read_status_mask;
if (status & BI)
flg = TTY_BREAK;
else if (status & PE)
flg = TTY_PARITY;
else if (status & FE)
flg = TTY_FRAME;
else
flg = TTY_NORMAL;
if (uart_handle_sysrq_char(&uart->port, ch))
goto ignore_char;
uart_insert_char(&uart->port, status, OE, ch, flg);
ignore_char:
tty_flip_buffer_push(&uart->port.state->port);
}
static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
{
struct circ_buf *xmit = &uart->port.state->xmit;
if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
/* Clear TFI bit */
UART_PUT_LSR(uart, TFI);
#endif
/* Anomaly notes:
* 05000215 - we always clear ETBEI within last UART TX
* interrupt to end a string. It is always set
* when start a new tx.
*/
UART_CLEAR_IER(uart, ETBEI);
return;
}
if (uart->port.x_char) {
UART_PUT_CHAR(uart, uart->port.x_char);
uart->port.icount.tx++;
uart->port.x_char = 0;
}
while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uart->port);
}
static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
while (UART_GET_LSR(uart) & DR)
bfin_serial_rx_chars(uart);
return IRQ_HANDLED;
}
static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
spin_lock(&uart->port.lock);
if (UART_GET_LSR(uart) & THRE)
bfin_serial_tx_chars(uart);
spin_unlock(&uart->port.lock);
return IRQ_HANDLED;
}
#endif
#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
struct circ_buf *xmit = &uart->port.state->xmit;
uart->tx_done = 0;
if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
uart->tx_count = 0;
uart->tx_done = 1;
return;
}
if (uart->port.x_char) {
UART_PUT_CHAR(uart, uart->port.x_char);
uart->port.icount.tx++;
uart->port.x_char = 0;
}
uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
uart->tx_count = UART_XMIT_SIZE - xmit->tail;
blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
(unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
set_dma_config(uart->tx_dma_channel,
set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
INTR_ON_BUF,
DIMENSION_LINEAR,
DATA_SIZE_8,
DMA_SYNC_RESTART));
set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
set_dma_x_modify(uart->tx_dma_channel, 1);
SSYNC();
enable_dma(uart->tx_dma_channel);
UART_SET_IER(uart, ETBEI);
}
static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
int i, flg, status;
status = UART_GET_LSR(uart);
UART_CLEAR_LSR(uart);
uart->port.icount.rx +=
CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
UART_XMIT_SIZE);
if (status & BI) {
uart->port.icount.brk++;
if (uart_handle_break(&uart->port))
goto dma_ignore_char;
status &= ~(PE | FE);
}
if (status & PE)
uart->port.icount.parity++;
if (status & OE)
uart->port.icount.overrun++;
if (status & FE)
uart->port.icount.frame++;
status &= uart->port.read_status_mask;
if (status & BI)
flg = TTY_BREAK;
else if (status & PE)
flg = TTY_PARITY;
else if (status & FE)
flg = TTY_FRAME;
else
flg = TTY_NORMAL;
for (i = uart->rx_dma_buf.tail; ; i++) {
if (i >= UART_XMIT_SIZE)
i = 0;
if (i == uart->rx_dma_buf.head)
break;
if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
uart_insert_char(&uart->port, status, OE,
uart->rx_dma_buf.buf[i], flg);
}
dma_ignore_char:
tty_flip_buffer_push(&uart->port.state->port);
}
void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
int x_pos, pos;
unsigned long flags;
spin_lock_irqsave(&uart->rx_lock, flags);
/* 2D DMA RX buffer ring is used. Because curr_y_count and
* curr_x_count can't be read as an atomic operation,
* curr_y_count should be read before curr_x_count. When
* curr_x_count is read, curr_y_count may already indicate
* next buffer line. But, the position calculated here is
* still indicate the old line. The wrong position data may
* be smaller than current buffer tail, which cause garbages
* are received if it is not prohibit.
*/
uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
uart->rx_dma_nrows = 0;
x_pos = DMA_RX_XCOUNT - x_pos;
if (x_pos == DMA_RX_XCOUNT)
x_pos = 0;
pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
/* Ignore receiving data if new position is in the same line of
* current buffer tail and small.
*/
if (pos > uart->rx_dma_buf.tail ||
uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
uart->rx_dma_buf.head = pos;
bfin_serial_dma_rx_chars(uart);
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
spin_unlock_irqrestore(&uart->rx_lock, flags);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
struct circ_buf *xmit = &uart->port.state->xmit;
spin_lock(&uart->port.lock);
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
clear_dma_irqstat(uart->tx_dma_channel);
/* Anomaly notes:
* 05000215 - we always clear ETBEI within last UART TX
* interrupt to end a string. It is always set
* when start a new tx.
*/
UART_CLEAR_IER(uart, ETBEI);
uart->port.icount.tx += uart->tx_count;
if (!(xmit->tail == 0 && xmit->head == 0)) {
xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uart->port);
}
bfin_serial_dma_tx_chars(uart);
}
spin_unlock(&uart->port.lock);
return IRQ_HANDLED;
}
static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
unsigned int irqstat;
int x_pos, pos;
spin_lock(&uart->rx_lock);
irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
clear_dma_irqstat(uart->rx_dma_channel);
uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
uart->rx_dma_nrows = 0;
pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
if (pos > uart->rx_dma_buf.tail ||
uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
uart->rx_dma_buf.head = pos;
bfin_serial_dma_rx_chars(uart);
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
spin_unlock(&uart->rx_lock);
return IRQ_HANDLED;
}
#endif
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int bfin_serial_tx_empty(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned int lsr;
lsr = UART_GET_LSR(uart);
if (lsr & TEMT)
return TIOCSER_TEMT;
else
return 0;
}
static void bfin_serial_break_ctl(struct uart_port *port, int break_state)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
u32 lcr = UART_GET_LCR(uart);
if (break_state)
lcr |= SB;
else
lcr &= ~SB;
UART_PUT_LCR(uart, lcr);
SSYNC();
}
static int bfin_serial_startup(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
#ifdef CONFIG_SERIAL_BFIN_DMA
dma_addr_t dma_handle;
if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) {
printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n");
return -EBUSY;
}
if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) {
printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n");
free_dma(uart->rx_dma_channel);
return -EBUSY;
}
set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart);
set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart);
uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
uart->rx_dma_buf.head = 0;
uart->rx_dma_buf.tail = 0;
uart->rx_dma_nrows = 0;
set_dma_config(uart->rx_dma_channel,
set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
INTR_ON_ROW, DIMENSION_2D,
DATA_SIZE_8,
DMA_SYNC_RESTART));
set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
set_dma_x_modify(uart->rx_dma_channel, 1);
set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
set_dma_y_modify(uart->rx_dma_channel, 1);
set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
enable_dma(uart->rx_dma_channel);
uart->rx_dma_timer.data = (unsigned long)(uart);
uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
add_timer(&(uart->rx_dma_timer));
#else
# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled)
kgdboc_break_enabled = 0;
else {
# endif
if (request_irq(uart->rx_irq, bfin_serial_rx_int, 0,
"BFIN_UART_RX", uart)) {
printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
return -EBUSY;
}
if (request_irq
(uart->tx_irq, bfin_serial_tx_int, 0,
"BFIN_UART_TX", uart)) {
printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n");
free_irq(uart->rx_irq, uart);
return -EBUSY;
}
# ifdef CONFIG_BF54x
{
/*
* UART2 and UART3 on BF548 share interrupt PINs and DMA
* controllers with SPORT2 and SPORT3. UART rx and tx
* interrupts are generated in PIO mode only when configure
* their peripheral mapping registers properly, which means
* request corresponding DMA channels in PIO mode as well.
*/
unsigned uart_dma_ch_rx, uart_dma_ch_tx;
switch (uart->rx_irq) {
case IRQ_UART3_RX:
uart_dma_ch_rx = CH_UART3_RX;
uart_dma_ch_tx = CH_UART3_TX;
break;
case IRQ_UART2_RX:
uart_dma_ch_rx = CH_UART2_RX;
uart_dma_ch_tx = CH_UART2_TX;
break;
default:
uart_dma_ch_rx = uart_dma_ch_tx = 0;
break;
};
if (uart_dma_ch_rx &&
request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
printk(KERN_NOTICE"Fail to attach UART interrupt\n");
free_irq(uart->rx_irq, uart);
free_irq(uart->tx_irq, uart);
return -EBUSY;
}
if (uart_dma_ch_tx &&
request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) {
printk(KERN_NOTICE "Fail to attach UART interrupt\n");
free_dma(uart_dma_ch_rx);
free_irq(uart->rx_irq, uart);
free_irq(uart->tx_irq, uart);
return -EBUSY;
}
}
# endif
# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
}
# endif
#endif
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0) {
if (request_irq(gpio_to_irq(uart->cts_pin),
bfin_serial_mctrl_cts_int,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
0, "BFIN_UART_CTS", uart)) {
uart->cts_pin = -1;
pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
}
}
if (uart->rts_pin >= 0) {
if (gpio_request(uart->rts_pin, DRIVER_NAME)) {
pr_info("fail to request RTS PIN at GPIO_%d\n", uart->rts_pin);
uart->rts_pin = -1;
} else
gpio_direction_output(uart->rts_pin, 0);
}
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0) {
if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
uart->cts_pin = -1;
dev_info(port->dev, "Unable to attach BlackFin UART Modem Status interrupt.\n");
}
/* CTS RTS PINs are negative assertive. */
UART_PUT_MCR(uart, UART_GET_MCR(uart) | ACTS);
UART_SET_IER(uart, EDSSI);
}
#endif
UART_SET_IER(uart, ERBFI);
return 0;
}
static void bfin_serial_shutdown(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
#ifdef CONFIG_SERIAL_BFIN_DMA
disable_dma(uart->tx_dma_channel);
free_dma(uart->tx_dma_channel);
disable_dma(uart->rx_dma_channel);
free_dma(uart->rx_dma_channel);
del_timer(&(uart->rx_dma_timer));
dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0);
#else
#ifdef CONFIG_BF54x
switch (uart->port.irq) {
case IRQ_UART3_RX:
free_dma(CH_UART3_RX);
free_dma(CH_UART3_TX);
break;
case IRQ_UART2_RX:
free_dma(CH_UART2_RX);
free_dma(CH_UART2_TX);
break;
default:
break;
};
#endif
free_irq(uart->rx_irq, uart);
free_irq(uart->tx_irq, uart);
#endif
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
if (uart->rts_pin >= 0)
gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0)
free_irq(uart->status_irq, uart);
#endif
}
static void
bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned long flags;
unsigned int baud, quot;
unsigned int ier, lcr = 0;
unsigned long timeout;
switch (termios->c_cflag & CSIZE) {
case CS8:
lcr = WLS(8);
break;
case CS7:
lcr = WLS(7);
break;
case CS6:
lcr = WLS(6);
break;
case CS5:
lcr = WLS(5);
break;
default:
printk(KERN_ERR "%s: word length not supported\n",
__func__);
}
/* Anomaly notes:
* 05000231 - STOP bit is always set to 1 whatever the user is set.
*/
if (termios->c_cflag & CSTOPB) {
if (ANOMALY_05000231)
printk(KERN_WARNING "STOP bits other than 1 is not "
"supported in case of anomaly 05000231.\n");
else
lcr |= STB;
}
if (termios->c_cflag & PARENB)
lcr |= PEN;
if (!(termios->c_cflag & PARODD))
lcr |= EPS;
if (termios->c_cflag & CMSPAR)
lcr |= STP;
spin_lock_irqsave(&uart->port.lock, flags);
port->read_status_mask = OE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (FE | PE);
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= BI;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= FE | PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= OE;
}
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
/* If discipline is not IRDA, apply ANOMALY_05000230 */
if (termios->c_line != N_IRDA)
quot -= ANOMALY_05000230;
UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
/* Wait till the transfer buffer is empty */
timeout = jiffies + msecs_to_jiffies(10);
while (UART_GET_GCTL(uart) & UCEN && !(UART_GET_LSR(uart) & TEMT))
if (time_after(jiffies, timeout)) {
dev_warn(port->dev, "timeout waiting for TX buffer empty\n");
break;
}
/* Disable UART */
ier = UART_GET_IER(uart);
UART_PUT_GCTL(uart, UART_GET_GCTL(uart) & ~UCEN);
UART_DISABLE_INTS(uart);
/* Set DLAB in LCR to Access CLK */
UART_SET_DLAB(uart);
UART_PUT_CLK(uart, quot);
SSYNC();
/* Clear DLAB in LCR to Access THR RBR IER */
UART_CLEAR_DLAB(uart);
UART_PUT_LCR(uart, (UART_GET_LCR(uart) & ~LCR_MASK) | lcr);
/* Enable UART */
UART_ENABLE_INTS(uart, ier);
UART_PUT_GCTL(uart, UART_GET_GCTL(uart) | UCEN);
/* Port speed changed, update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&uart->port.lock, flags);
}
static const char *bfin_serial_type(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void bfin_serial_release_port(struct uart_port *port)
{
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int bfin_serial_request_port(struct uart_port *port)
{
return 0;
}
/*
* Configure/autoconfigure the port.
*/
static void bfin_serial_config_port(struct uart_port *port, int flags)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
if (flags & UART_CONFIG_TYPE &&
bfin_serial_request_port(&uart->port) == 0)
uart->port.type = PORT_BFIN;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_BFIN and PORT_UNKNOWN
*/
static int
bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
{
return 0;
}
/*
* Enable the IrDA function if tty->ldisc.num is N_IRDA.
* In other cases, disable IrDA function.
*/
static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned int val;
switch (ld) {
case N_IRDA:
val = UART_GET_GCTL(uart);
val |= (UMOD_IRDA | RPOLC);
UART_PUT_GCTL(uart, val);
break;
default:
val = UART_GET_GCTL(uart);
val &= ~(UMOD_MASK | RPOLC);
UART_PUT_GCTL(uart, val);
}
}
static void bfin_serial_reset_irda(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned int val;
val = UART_GET_GCTL(uart);
val &= ~(UMOD_MASK | RPOLC);
UART_PUT_GCTL(uart, val);
SSYNC();
val |= (UMOD_IRDA | RPOLC);
UART_PUT_GCTL(uart, val);
SSYNC();
}
#ifdef CONFIG_CONSOLE_POLL
/* Anomaly notes:
* 05000099 - Because we only use THRE in poll_put and DR in poll_get,
* losing other bits of UART_LSR is not a problem here.
*/
static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
while (!(UART_GET_LSR(uart) & THRE))
cpu_relax();
UART_CLEAR_DLAB(uart);
UART_PUT_CHAR(uart, (unsigned char)chr);
}
static int bfin_serial_poll_get_char(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned char chr;
while (!(UART_GET_LSR(uart) & DR))
cpu_relax();
UART_CLEAR_DLAB(uart);
chr = UART_GET_CHAR(uart);
return chr;
}
#endif
static struct uart_ops bfin_serial_pops = {
.tx_empty = bfin_serial_tx_empty,
.set_mctrl = bfin_serial_set_mctrl,
.get_mctrl = bfin_serial_get_mctrl,
.stop_tx = bfin_serial_stop_tx,
.start_tx = bfin_serial_start_tx,
.stop_rx = bfin_serial_stop_rx,
.enable_ms = bfin_serial_enable_ms,
.break_ctl = bfin_serial_break_ctl,
.startup = bfin_serial_startup,
.shutdown = bfin_serial_shutdown,
.set_termios = bfin_serial_set_termios,
.set_ldisc = bfin_serial_set_ldisc,
.type = bfin_serial_type,
.release_port = bfin_serial_release_port,
.request_port = bfin_serial_request_port,
.config_port = bfin_serial_config_port,
.verify_port = bfin_serial_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = bfin_serial_poll_put_char,
.poll_get_char = bfin_serial_poll_get_char,
#endif
};
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init
bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
int *parity, int *bits)
{
unsigned int status;
status = UART_GET_IER(uart) & (ERBFI | ETBEI);
if (status == (ERBFI | ETBEI)) {
/* ok, the port was enabled */
u32 lcr, clk;
lcr = UART_GET_LCR(uart);
*parity = 'n';
if (lcr & PEN) {
if (lcr & EPS)
*parity = 'e';
else
*parity = 'o';
}
*bits = ((lcr & WLS_MASK) >> WLS_OFFSET) + 5;
/* Set DLAB in LCR to Access CLK */
UART_SET_DLAB(uart);
clk = UART_GET_CLK(uart);
/* Clear DLAB in LCR to Access THR RBR IER */
UART_CLEAR_DLAB(uart);
*baud = get_sclk() / (16*clk);
}
pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits);
}
static struct uart_driver bfin_serial_reg;
static void bfin_serial_console_putchar(struct uart_port *port, int ch)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
while (!(UART_GET_LSR(uart) & THRE))
barrier();
UART_PUT_CHAR(uart, ch);
}
#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
defined (CONFIG_EARLY_PRINTK) */
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
#define CLASS_BFIN_CONSOLE "bfin-console"
/*
* Interrupts are disabled on entering
*/
static void
bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
{
struct bfin_serial_port *uart = bfin_serial_ports[co->index];
unsigned long flags;
spin_lock_irqsave(&uart->port.lock, flags);
uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
spin_unlock_irqrestore(&uart->port.lock, flags);
}
static int __init
bfin_serial_console_setup(struct console *co, char *options)
{
struct bfin_serial_port *uart;
int baud = 57600;
int bits = 8;
int parity = 'n';
# if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
int flow = 'r';
# else
int flow = 'n';
# endif
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
return -ENODEV;
uart = bfin_serial_ports[co->index];
if (!uart)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
bfin_serial_console_get_options(uart, &baud, &parity, &bits);
return uart_set_options(&uart->port, co, baud, parity, bits, flow);
}
static struct console bfin_serial_console = {
.name = BFIN_SERIAL_DEV_NAME,
.write = bfin_serial_console_write,
.device = uart_console_device,
.setup = bfin_serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bfin_serial_reg,
};
#define BFIN_SERIAL_CONSOLE (&bfin_serial_console)
#else
#define BFIN_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
#ifdef CONFIG_EARLY_PRINTK
static struct bfin_serial_port bfin_earlyprintk_port;
#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
/*
* Interrupts are disabled on entering
*/
static void
bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
{
unsigned long flags;
if (bfin_earlyprintk_port.port.line != co->index)
return;
spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
uart_console_write(&bfin_earlyprintk_port.port, s, count,
bfin_serial_console_putchar);
spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
}
/*
* This should have a .setup or .early_setup in it, but then things get called
* without the command line options, and the baud rate gets messed up - so
* don't let the common infrastructure play with things. (see calls to setup
* & earlysetup in ./kernel/printk.c:register_console()
*/
static struct __initdata console bfin_early_serial_console = {
.name = "early_BFuart",
.write = bfin_earlyprintk_console_write,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bfin_serial_reg,
};
#endif
static struct uart_driver bfin_serial_reg = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = BFIN_SERIAL_DEV_NAME,
.major = BFIN_SERIAL_MAJOR,
.minor = BFIN_SERIAL_MINOR,
.nr = BFIN_UART_NR_PORTS,
.cons = BFIN_SERIAL_CONSOLE,
};
static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
{
struct bfin_serial_port *uart = platform_get_drvdata(pdev);
return uart_suspend_port(&bfin_serial_reg, &uart->port);
}
static int bfin_serial_resume(struct platform_device *pdev)
{
struct bfin_serial_port *uart = platform_get_drvdata(pdev);
return uart_resume_port(&bfin_serial_reg, &uart->port);
}
static int bfin_serial_probe(struct platform_device *pdev)
{
struct resource *res;
struct bfin_serial_port *uart = NULL;
int ret = 0;
if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
return -ENOENT;
}
if (bfin_serial_ports[pdev->id] == NULL) {
uart = kzalloc(sizeof(*uart), GFP_KERNEL);
if (!uart) {
dev_err(&pdev->dev,
"fail to malloc bfin_serial_port\n");
return -ENOMEM;
}
bfin_serial_ports[pdev->id] = uart;
#ifdef CONFIG_EARLY_PRINTK
if (!(bfin_earlyprintk_port.port.membase
&& bfin_earlyprintk_port.port.line == pdev->id)) {
/*
* If the peripheral PINs of current port is allocated
* in earlyprintk probe stage, don't do it again.
*/
#endif
ret = peripheral_request_list(
(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
goto out_error_free_mem;
}
#ifdef CONFIG_EARLY_PRINTK
}
#endif
spin_lock_init(&uart->port.lock);
uart->port.uartclk = get_sclk();
uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
uart->port.ops = &bfin_serial_pops;
uart->port.line = pdev->id;
uart->port.iotype = UPIO_MEM;
uart->port.flags = UPF_BOOT_AUTOCONF;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
ret = -ENOENT;
goto out_error_free_peripherals;
}
uart->port.membase = ioremap(res->start, resource_size(res));
if (!uart->port.membase) {
dev_err(&pdev->dev, "Cannot map uart IO\n");
ret = -ENXIO;
goto out_error_free_peripherals;
}
uart->port.mapbase = res->start;
uart->tx_irq = platform_get_irq(pdev, 0);
if (uart->tx_irq < 0) {
dev_err(&pdev->dev, "No uart TX IRQ specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
uart->rx_irq = platform_get_irq(pdev, 1);
if (uart->rx_irq < 0) {
dev_err(&pdev->dev, "No uart RX IRQ specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
uart->port.irq = uart->rx_irq;
uart->status_irq = platform_get_irq(pdev, 2);
if (uart->status_irq < 0) {
dev_err(&pdev->dev, "No uart status IRQ specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
#ifdef CONFIG_SERIAL_BFIN_DMA
spin_lock_init(&uart->rx_lock);
uart->tx_done = 1;
uart->tx_count = 0;
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
uart->tx_dma_channel = res->start;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (res == NULL) {
dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
uart->rx_dma_channel = res->start;
init_timer(&(uart->rx_dma_timer));
#endif
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
uart->cts_pin = -1;
else {
uart->cts_pin = res->start;
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
uart->port.flags |= ASYNC_CTS_FLOW;
#endif
}
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
uart->rts_pin = -1;
else
uart->rts_pin = res->start;
#endif
}
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
if (!is_early_platform_device(pdev)) {
#endif
uart = bfin_serial_ports[pdev->id];
uart->port.dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, uart);
ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
}
#endif
if (!ret)
return 0;
if (uart) {
out_error_unmap:
iounmap(uart->port.membase);
out_error_free_peripherals:
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
out_error_free_mem:
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
}
return ret;
}
static int bfin_serial_remove(struct platform_device *pdev)
{
struct bfin_serial_port *uart = platform_get_drvdata(pdev);
dev_set_drvdata(&pdev->dev, NULL);
if (uart) {
uart_remove_one_port(&bfin_serial_reg, &uart->port);
iounmap(uart->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
}
return 0;
}
static struct platform_driver bfin_serial_driver = {
.probe = bfin_serial_probe,
.remove = bfin_serial_remove,
.suspend = bfin_serial_suspend,
.resume = bfin_serial_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
static __initdata struct early_platform_driver early_bfin_serial_driver = {
.class_str = CLASS_BFIN_CONSOLE,
.pdrv = &bfin_serial_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
};
static int __init bfin_serial_rs_console_init(void)
{
early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
register_console(&bfin_serial_console);
return 0;
}
console_initcall(bfin_serial_rs_console_init);
#endif
#ifdef CONFIG_EARLY_PRINTK
/*
* Memory can't be allocated dynamically during earlyprink init stage.
* So, do individual probe for earlyprink with a static uart port variable.
*/
static int bfin_earlyprintk_probe(struct platform_device *pdev)
{
struct resource *res;
int ret;
if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
return -ENOENT;
}
ret = peripheral_request_list(
(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
ret = -ENOENT;
goto out_error_free_peripherals;
}
bfin_earlyprintk_port.port.membase = ioremap(res->start,
resource_size(res));
if (!bfin_earlyprintk_port.port.membase) {
dev_err(&pdev->dev, "Cannot map uart IO\n");
ret = -ENXIO;
goto out_error_free_peripherals;
}
bfin_earlyprintk_port.port.mapbase = res->start;
bfin_earlyprintk_port.port.line = pdev->id;
bfin_earlyprintk_port.port.uartclk = get_sclk();
bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
spin_lock_init(&bfin_earlyprintk_port.port.lock);
return 0;
out_error_free_peripherals:
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
return ret;
}
static struct platform_driver bfin_earlyprintk_driver = {
.probe = bfin_earlyprintk_probe,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
.class_str = CLASS_BFIN_EARLYPRINTK,
.pdrv = &bfin_earlyprintk_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
};
struct console __init *bfin_earlyserial_init(unsigned int port,
unsigned int cflag)
{
struct ktermios t;
char port_name[20];
if (port < 0 || port >= BFIN_UART_NR_PORTS)
return NULL;
/*
* Only probe resource of the given port in earlyprintk boot arg.
* The expected port id should be indicated in port name string.
*/
snprintf(port_name, 20, DRIVER_NAME ".%d", port);
early_platform_driver_register(&early_bfin_earlyprintk_driver,
port_name);
early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
if (!bfin_earlyprintk_port.port.membase)
return NULL;
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
/*
* If we are using early serial, don't let the normal console rewind
* log buffer, since that causes things to be printed multiple times
*/
bfin_serial_console.flags &= ~CON_PRINTBUFFER;
#endif
bfin_early_serial_console.index = port;
t.c_cflag = cflag;
t.c_iflag = 0;
t.c_oflag = 0;
t.c_lflag = ICANON;
t.c_line = port;
bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
return &bfin_early_serial_console;
}
#endif /* CONFIG_EARLY_PRINTK */
static int __init bfin_serial_init(void)
{
int ret;
pr_info("Blackfin serial driver\n");
ret = uart_register_driver(&bfin_serial_reg);
if (ret) {
pr_err("failed to register %s:%d\n",
bfin_serial_reg.driver_name, ret);
}
ret = platform_driver_register(&bfin_serial_driver);
if (ret) {
pr_err("fail to register bfin uart\n");
uart_unregister_driver(&bfin_serial_reg);
}
return ret;
}
static void __exit bfin_serial_exit(void)
{
platform_driver_unregister(&bfin_serial_driver);
uart_unregister_driver(&bfin_serial_reg);
}
module_init(bfin_serial_init);
module_exit(bfin_serial_exit);
MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
MODULE_DESCRIPTION("Blackfin generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
MODULE_ALIAS("platform:bfin-uart");
| gpl-2.0 |
skelton/amlogic_common_3050 | arch/frv/mb93090-mb00/pci-vdk.c | 3905 | 12741 | /* pci-vdk.c: MB93090-MB00 (VDK) PCI support
*
* Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <asm/segment.h>
#include <asm/io.h>
#include <asm/mb-regs.h>
#include <asm/mb86943a.h>
#include "pci-frv.h"
unsigned int __nongpreldata pci_probe = 1;
int __nongpreldata pcibios_last_bus = -1;
struct pci_bus *__nongpreldata pci_root_bus;
struct pci_ops *__nongpreldata pci_root_ops;
/*
* The accessible PCI window does not cover the entire CPU address space, but
* there are devices we want to access outside of that window, so we need to
* insert specific PCI bus resources instead of using the platform-level bus
* resources directly for the PCI root bus.
*
* These are configured and inserted by pcibios_init() and are attached to the
* root bus by pcibios_fixup_bus().
*/
static struct resource pci_ioport_resource = {
.name = "PCI IO",
.start = 0,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
static struct resource pci_iomem_resource = {
.name = "PCI mem",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
/*
* Functions for accessing PCI configuration space
*/
#define CONFIG_CMD(bus, dev, where) \
(0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
#define __set_PciCfgAddr(A) writel((A), (volatile void __iomem *) __region_CS1 + 0x80)
#define __get_PciCfgDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 3))
#define __get_PciCfgDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 2))
#define __get_PciCfgDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x88)
#define __set_PciCfgDataB(A,V) \
writeb((V), (volatile void __iomem *) __region_CS1 + 0x88 + (3 - ((A) & 3)))
#define __set_PciCfgDataW(A,V) \
writew((V), (volatile void __iomem *) __region_CS1 + 0x88 + (2 - ((A) & 2)))
#define __set_PciCfgDataL(A,V) \
writel((V), (volatile void __iomem *) __region_CS1 + 0x88)
#define __get_PciBridgeDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __get_PciBridgeDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __get_PciBridgeDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __set_PciBridgeDataB(A,V) writeb((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __set_PciBridgeDataW(A,V) writew((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __set_PciBridgeDataL(A,V) writel((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A))
static inline int __query(const struct pci_dev *dev)
{
// return dev->bus->number==0 && (dev->devfn==PCI_DEVFN(0,0));
// return dev->bus->number==1;
// return dev->bus->number==0 &&
// (dev->devfn==PCI_DEVFN(2,0) || dev->devfn==PCI_DEVFN(3,0));
return 0;
}
/*****************************************************************************/
/*
*
*/
static int pci_frv_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 *val)
{
u32 _value;
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
_value = __get_PciBridgeDataL(where & ~3);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
_value = __get_PciCfgDataL(where & ~3);
}
switch (size) {
case 1:
_value = _value >> ((where & 3) * 8);
break;
case 2:
_value = _value >> ((where & 2) * 8);
break;
case 4:
break;
default:
BUG();
}
*val = _value;
return PCIBIOS_SUCCESSFUL;
}
static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 value)
{
switch (size) {
case 1:
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
__set_PciBridgeDataB(where, value);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
__set_PciCfgDataB(where, value);
}
break;
case 2:
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
__set_PciBridgeDataW(where, value);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
__set_PciCfgDataW(where, value);
}
break;
case 4:
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
__set_PciBridgeDataL(where, value);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
__set_PciCfgDataL(where, value);
}
break;
default:
BUG();
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops pci_direct_frv = {
pci_frv_read_config,
pci_frv_write_config,
};
/*
* Before we decide to use direct hardware access mechanisms, we try to do some
* trivial checks to ensure it at least _seems_ to be working -- we just test
* whether bus 00 contains a host bridge (this is similar to checking
* techniques used in XFree86, but ours should be more reliable since we
* attempt to make use of direct access hints provided by the PCI BIOS).
*
* This should be close to trivial, but it isn't, because there are buggy
* chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
*/
static int __init pci_sanity_check(struct pci_ops *o)
{
struct pci_bus bus; /* Fake bus and device */
u32 id;
bus.number = 0;
if (o->read(&bus, 0, PCI_VENDOR_ID, 4, &id) == PCIBIOS_SUCCESSFUL) {
printk("PCI: VDK Bridge device:vendor: %08x\n", id);
if (id == 0x200e10cf)
return 1;
}
printk("PCI: VDK Bridge: Sanity check failed\n");
return 0;
}
static struct pci_ops * __init pci_check_direct(void)
{
unsigned long flags;
local_irq_save(flags);
/* check if access works */
if (pci_sanity_check(&pci_direct_frv)) {
local_irq_restore(flags);
printk("PCI: Using configuration frv\n");
// request_mem_region(0xBE040000, 256, "FRV bridge");
// request_mem_region(0xBFFFFFF4, 12, "PCI frv");
return &pci_direct_frv;
}
local_irq_restore(flags);
return NULL;
}
/*
* Discover remaining PCI buses in case there are peer host bridges.
* We use the number of last PCI bus provided by the PCI BIOS.
*/
static void __init pcibios_fixup_peer_bridges(void)
{
struct pci_bus bus;
struct pci_dev dev;
int n;
u16 l;
if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
return;
printk("PCI: Peer bridge fixup\n");
for (n=0; n <= pcibios_last_bus; n++) {
if (pci_find_bus(0, n))
continue;
bus.number = n;
bus.ops = pci_root_ops;
dev.bus = &bus;
for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
printk("PCI: Discovered peer bus %02x\n", n);
pci_scan_bus(n, pci_root_ops, NULL);
break;
}
}
}
/*
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
*/
static void __init pci_fixup_umc_ide(struct pci_dev *d)
{
/*
* UM8886BF IDE controller sets region type bits incorrectly,
* therefore they look like memory despite of them being I/O.
*/
int i;
printk("PCI: Fixing base address flags for device %s\n", pci_name(d));
for(i=0; i<4; i++)
d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
}
static void __init pci_fixup_ide_bases(struct pci_dev *d)
{
int i;
/*
* PCI IDE controllers use non-standard I/O port decoding, respect it.
*/
if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
printk("PCI: IDE base address fixup for %s\n", pci_name(d));
for(i=0; i<4; i++) {
struct resource *r = &d->resource[i];
if ((r->start & ~0x80) == 0x374) {
r->start |= 2;
r->end = r->start;
}
}
}
static void __init pci_fixup_ide_trash(struct pci_dev *d)
{
int i;
/*
* There exist PCI IDE controllers which have utter garbage
* in first four base registers. Ignore that.
*/
printk("PCI: IDE base address trash cleared for %s\n", pci_name(d));
for(i=0; i<4; i++)
d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
}
static void __devinit pci_fixup_latency(struct pci_dev *d)
{
/*
* SiS 5597 and 5598 chipsets require latency timer set to
* at most 32 to avoid lockups.
*/
DBG("PCI: Setting max latency to 32\n");
pcibios_max_latency = 32;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
/*
* Called after each bus is probed, but before its children
* are examined.
*/
void __init pcibios_fixup_bus(struct pci_bus *bus)
{
#if 0
printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
#endif
if (bus->number == 0) {
bus->resource[0] = &pci_ioport_resource;
bus->resource[1] = &pci_iomem_resource;
}
pci_read_bridge_bases(bus);
if (bus->number == 0) {
struct list_head *ln;
struct pci_dev *dev;
for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
dev = pci_dev_b(ln);
if (dev->devfn == 0) {
dev->resource[0].start = 0;
dev->resource[0].end = 0;
}
}
}
}
/*
* Initialization. Try all known PCI access methods. Note that we support
* using both PCI BIOS and direct access: in such cases, we use I/O ports
* to access config space, but we still keep BIOS order of cards to be
* compatible with 2.0.X. This should go away some day.
*/
int __init pcibios_init(void)
{
struct pci_ops *dir = NULL;
if (!mb93090_mb00_detected)
return -ENXIO;
__reg_MB86943_sl_ctl |= MB86943_SL_CTL_DRCT_MASTER_SWAP | MB86943_SL_CTL_DRCT_SLAVE_SWAP;
__reg_MB86943_ecs_base(1) = ((__region_CS2 + 0x01000000) >> 9) | 0x08000000;
__reg_MB86943_ecs_base(2) = ((__region_CS2 + 0x00000000) >> 9) | 0x08000000;
*(volatile uint32_t *) (__region_CS1 + 0x848) = 0xe0000000;
*(volatile uint32_t *) (__region_CS1 + 0x8b8) = 0x00000000;
__reg_MB86943_sl_pci_io_base = (__region_CS2 + 0x04000000) >> 9;
__reg_MB86943_sl_pci_mem_base = (__region_CS2 + 0x08000000) >> 9;
__reg_MB86943_pci_sl_io_base = __region_CS2 + 0x04000000;
__reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000;
mb();
/* enable PCI arbitration */
__reg_MB86943_pci_arbiter = MB86943_PCIARB_EN;
pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00;
pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff;
pci_ioport_resource.end += pci_ioport_resource.start;
printk("PCI IO window: %08llx-%08llx\n",
(unsigned long long) pci_ioport_resource.start,
(unsigned long long) pci_ioport_resource.end);
pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00;
pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff;
pci_iomem_resource.end += pci_iomem_resource.start;
/* Reserve somewhere to write to flush posted writes. This is used by
* __flush_PCI_writes() from asm/io.h to force the write FIFO in the
* CPU-PCI bridge to flush as this doesn't happen automatically when a
* read is performed on the MB93090 development kit motherboard.
*/
pci_iomem_resource.start += 0x400;
printk("PCI MEM window: %08llx-%08llx\n",
(unsigned long long) pci_iomem_resource.start,
(unsigned long long) pci_iomem_resource.end);
printk("PCI DMA memory: %08lx-%08lx\n",
dma_coherent_mem_start, dma_coherent_mem_end);
if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0)
panic("Unable to insert PCI IOMEM resource\n");
if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0)
panic("Unable to insert PCI IOPORT resource\n");
if (!pci_probe)
return -ENXIO;
dir = pci_check_direct();
if (dir)
pci_root_ops = dir;
else {
printk("PCI: No PCI bus detected\n");
return -ENXIO;
}
printk("PCI: Probing PCI hardware\n");
pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL);
pcibios_irq_init();
pcibios_fixup_peer_bridges();
pcibios_fixup_irqs();
pcibios_resource_survey();
return 0;
}
arch_initcall(pcibios_init);
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
} else if (!strncmp(str, "lastbus=", 8)) {
pcibios_last_bus = simple_strtol(str+8, NULL, 0);
return NULL;
}
return str;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
if ((err = pci_enable_resources(dev, mask)) < 0)
return err;
if (!dev->msi_enabled)
pcibios_enable_irq(dev);
return 0;
}
| gpl-2.0 |
jgcaap/boeffla | drivers/scsi/scsi_netlink.c | 4929 | 16136 | /*
* scsi_netlink.c - SCSI Transport Netlink Interface
*
* Copyright (C) 2006 James Smart, Emulex Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/security.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <scsi/scsi_netlink.h>
#include "scsi_priv.h"
struct sock *scsi_nl_sock = NULL;
EXPORT_SYMBOL_GPL(scsi_nl_sock);
static DEFINE_SPINLOCK(scsi_nl_lock);
static struct list_head scsi_nl_drivers;
static u32 scsi_nl_state;
#define STATE_EHANDLER_BSY 0x00000001
struct scsi_nl_transport {
int (*msg_handler)(struct sk_buff *);
void (*event_handler)(struct notifier_block *, unsigned long, void *);
unsigned int refcnt;
int flags;
};
/* flags values (bit flags) */
#define HANDLER_DELETING 0x1
static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
{ {NULL, }, };
struct scsi_nl_drvr {
struct list_head next;
int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
u32 len, u32 pid);
void (*devt_handler)(struct notifier_block *nb,
unsigned long event, void *notify_ptr);
struct scsi_host_template *hostt;
u64 vendor_id;
unsigned int refcnt;
int flags;
};
/**
* scsi_nl_rcv_msg - Receive message handler.
* @skb: socket receive buffer
*
* Description: Extracts message from a receive buffer.
* Validates message header and calls appropriate transport message handler
*
*
**/
static void
scsi_nl_rcv_msg(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct scsi_nl_hdr *hdr;
unsigned long flags;
u32 rlen;
int err, tport;
while (skb->len >= NLMSG_SPACE(0)) {
err = 0;
nlh = nlmsg_hdr(skb);
if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
(skb->len < nlh->nlmsg_len)) {
printk(KERN_WARNING "%s: discarding partial skb\n",
__func__);
return;
}
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
if (rlen > skb->len)
rlen = skb->len;
if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
err = -EBADMSG;
goto next_msg;
}
hdr = NLMSG_DATA(nlh);
if ((hdr->version != SCSI_NL_VERSION) ||
(hdr->magic != SCSI_NL_MAGIC)) {
err = -EPROTOTYPE;
goto next_msg;
}
if (!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto next_msg;
}
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
__func__);
goto next_msg;
}
/*
* Deliver message to the appropriate transport
*/
spin_lock_irqsave(&scsi_nl_lock, flags);
tport = hdr->transport;
if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
!(transports[tport].flags & HANDLER_DELETING) &&
(transports[tport].msg_handler)) {
transports[tport].refcnt++;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = transports[tport].msg_handler(skb);
spin_lock_irqsave(&scsi_nl_lock, flags);
transports[tport].refcnt--;
} else
err = -ENOENT;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
next_msg:
if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
netlink_ack(skb, nlh, err);
skb_pull(skb, rlen);
}
}
/**
* scsi_nl_rcv_event - Event handler for a netlink socket.
* @this: event notifier block
* @event: event type
* @ptr: event payload
*
**/
static int
scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct scsi_nl_drvr *driver;
unsigned long flags;
int tport;
if (n->protocol != NETLINK_SCSITRANSPORT)
return NOTIFY_DONE;
spin_lock_irqsave(&scsi_nl_lock, flags);
scsi_nl_state |= STATE_EHANDLER_BSY;
/*
* Pass event on to any transports that may be listening
*/
for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
if (!(transports[tport].flags & HANDLER_DELETING) &&
(transports[tport].event_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
transports[tport].event_handler(this, event, ptr);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
}
/*
* Pass event on to any drivers that may be listening
*/
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (!(driver->flags & HANDLER_DELETING) &&
(driver->devt_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
driver->devt_handler(this, event, ptr);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
}
scsi_nl_state &= ~STATE_EHANDLER_BSY;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return NOTIFY_DONE;
}
static struct notifier_block scsi_netlink_notifier = {
.notifier_call = scsi_nl_rcv_event,
};
/*
* GENERIC SCSI transport receive and event handlers
*/
/**
* scsi_generic_msg_handler - receive message handler for GENERIC transport messages
* @skb: socket receive buffer
**/
static int
scsi_generic_msg_handler(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
struct scsi_nl_drvr *driver;
struct Scsi_Host *shost;
unsigned long flags;
int err = 0, match, pid;
pid = NETLINK_CREDS(skb)->pid;
switch (snlh->msgtype) {
case SCSI_NL_SHOST_VENDOR:
{
struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
/* Locate the driver that corresponds to the message */
spin_lock_irqsave(&scsi_nl_lock, flags);
match = 0;
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (driver->vendor_id == msg->vendor_id) {
match = 1;
break;
}
}
if ((!match) || (!driver->dmsg_handler)) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = -ESRCH;
goto rcv_exit;
}
if (driver->flags & HANDLER_DELETING) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
err = -ESHUTDOWN;
goto rcv_exit;
}
driver->refcnt++;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
/* if successful, scsi_host_lookup takes a shost reference */
shost = scsi_host_lookup(msg->host_no);
if (!shost) {
err = -ENODEV;
goto driver_exit;
}
/* is this host owned by the vendor ? */
if (shost->hostt != driver->hostt) {
err = -EINVAL;
goto vendormsg_put;
}
/* pass message on to the driver */
err = driver->dmsg_handler(shost, (void *)&msg[1],
msg->vmsg_datalen, pid);
vendormsg_put:
/* release reference by scsi_host_lookup */
scsi_host_put(shost);
driver_exit:
/* release our own reference on the registration object */
spin_lock_irqsave(&scsi_nl_lock, flags);
driver->refcnt--;
spin_unlock_irqrestore(&scsi_nl_lock, flags);
break;
}
default:
err = -EBADR;
break;
}
rcv_exit:
if (err)
printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
__func__, snlh->msgtype, err);
return err;
}
/**
* scsi_nl_add_transport -
* Registers message and event handlers for a transport. Enables
* receipt of netlink messages and events to a transport.
*
* @tport: transport registering handlers
* @msg_handler: receive message handler callback
* @event_handler: receive event handler callback
**/
int
scsi_nl_add_transport(u8 tport,
int (*msg_handler)(struct sk_buff *),
void (*event_handler)(struct notifier_block *, unsigned long, void *))
{
unsigned long flags;
int err = 0;
if (tport >= SCSI_NL_MAX_TRANSPORTS)
return -EINVAL;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
if (transports[tport].msg_handler || transports[tport].event_handler) {
err = -EALREADY;
goto register_out;
}
transports[tport].msg_handler = msg_handler;
transports[tport].event_handler = event_handler;
transports[tport].flags = 0;
transports[tport].refcnt = 0;
register_out:
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
/**
* scsi_nl_remove_transport -
* Disable transport receiption of messages and events
*
* @tport: transport deregistering handlers
*
**/
void
scsi_nl_remove_transport(u8 tport)
{
unsigned long flags;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
if (tport < SCSI_NL_MAX_TRANSPORTS) {
transports[tport].flags |= HANDLER_DELETING;
while (transports[tport].refcnt != 0) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
schedule_timeout_uninterruptible(HZ/4);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
transports[tport].msg_handler = NULL;
transports[tport].event_handler = NULL;
transports[tport].flags = 0;
}
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
/**
* scsi_nl_add_driver -
* A driver is registering its interfaces for SCSI netlink messages
*
* @vendor_id: A unique identification value for the driver.
* @hostt: address of the driver's host template. Used
* to verify an shost is bound to the driver
* @nlmsg_handler: receive message handler callback
* @nlevt_handler: receive event handler callback
*
* Returns:
* 0 on Success
* error result otherwise
**/
int
scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
u32 len, u32 pid),
void (*nlevt_handler)(struct notifier_block *nb,
unsigned long event, void *notify_ptr))
{
struct scsi_nl_drvr *driver;
unsigned long flags;
driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (unlikely(!driver)) {
printk(KERN_ERR "%s: allocation failure\n", __func__);
return -ENOMEM;
}
driver->dmsg_handler = nlmsg_handler;
driver->devt_handler = nlevt_handler;
driver->hostt = hostt;
driver->vendor_id = vendor_id;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_add_tail(&driver->next, &scsi_nl_drivers);
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
/**
* scsi_nl_remove_driver -
* An driver is unregistering with the SCSI netlink messages
*
* @vendor_id: The unique identification value for the driver.
**/
void
scsi_nl_remove_driver(u64 vendor_id)
{
struct scsi_nl_drvr *driver;
unsigned long flags;
spin_lock_irqsave(&scsi_nl_lock, flags);
if (scsi_nl_state & STATE_EHANDLER_BSY) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
msleep(1);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_for_each_entry(driver, &scsi_nl_drivers, next) {
if (driver->vendor_id == vendor_id) {
driver->flags |= HANDLER_DELETING;
while (driver->refcnt != 0) {
spin_unlock_irqrestore(&scsi_nl_lock, flags);
schedule_timeout_uninterruptible(HZ/4);
spin_lock_irqsave(&scsi_nl_lock, flags);
}
list_del(&driver->next);
kfree(driver);
spin_unlock_irqrestore(&scsi_nl_lock, flags);
return;
}
}
spin_unlock_irqrestore(&scsi_nl_lock, flags);
printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
__func__, (unsigned long long)vendor_id);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
/**
* scsi_netlink_init - Called by SCSI subsystem to initialize
* the SCSI transport netlink interface
*
**/
void
scsi_netlink_init(void)
{
int error;
INIT_LIST_HEAD(&scsi_nl_drivers);
error = netlink_register_notifier(&scsi_netlink_notifier);
if (error) {
printk(KERN_ERR "%s: register of event handler failed - %d\n",
__func__, error);
return;
}
scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL,
THIS_MODULE);
if (!scsi_nl_sock) {
printk(KERN_ERR "%s: register of receive handler failed\n",
__func__);
netlink_unregister_notifier(&scsi_netlink_notifier);
return;
}
/* Register the entry points for the generic SCSI transport */
error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
scsi_generic_msg_handler, NULL);
if (error)
printk(KERN_ERR "%s: register of GENERIC transport handler"
" failed - %d\n", __func__, error);
return;
}
/**
* scsi_netlink_exit - Called by SCSI subsystem to disable the SCSI transport netlink interface
*
**/
void
scsi_netlink_exit(void)
{
scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
if (scsi_nl_sock) {
netlink_kernel_release(scsi_nl_sock);
netlink_unregister_notifier(&scsi_netlink_notifier);
}
return;
}
/*
* Exported Interfaces
*/
/**
* scsi_nl_send_transport_msg -
* Generic function to send a single message from a SCSI transport to
* a single process
*
* @pid: receiving pid
* @hdr: message payload
*
**/
void
scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
const char *fn;
char *datab;
u32 len, skblen;
int err;
if (!scsi_nl_sock) {
err = -ENOENT;
fn = "netlink socket";
goto msg_fail;
}
len = NLMSG_SPACE(hdr->msglen);
skblen = NLMSG_SPACE(len);
skb = alloc_skb(skblen, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
fn = "alloc_skb";
goto msg_fail;
}
nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
if (!nlh) {
err = -ENOBUFS;
fn = "nlmsg_put";
goto msg_fail_skb;
}
datab = NLMSG_DATA(nlh);
memcpy(datab, hdr, hdr->msglen);
err = nlmsg_unicast(scsi_nl_sock, skb, pid);
if (err < 0) {
fn = "nlmsg_unicast";
/* nlmsg_unicast already kfree_skb'd */
goto msg_fail;
}
return;
msg_fail_skb:
kfree_skb(skb);
msg_fail:
printk(KERN_WARNING
"%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
"msglen %d: %s : err %d\n",
__func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
fn, err);
return;
}
EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
/**
* scsi_nl_send_vendor_msg - called to send a shost vendor unique message
* to a specific process id.
*
* @pid: process id of the receiver
* @host_no: host # sending the message
* @vendor_id: unique identifier for the driver's vendor
* @data_len: amount, in bytes, of vendor unique payload data
* @data_buf: pointer to vendor unique data buffer
*
* Returns:
* 0 on successful return
* otherwise, failing error code
*
* Notes:
* This routine assumes no locks are held on entry.
*/
int
scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
char *data_buf, u32 data_len)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct scsi_nl_host_vendor_msg *msg;
u32 len, skblen;
int err;
if (!scsi_nl_sock) {
err = -ENOENT;
goto send_vendor_fail;
}
len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
skblen = NLMSG_SPACE(len);
skb = alloc_skb(skblen, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto send_vendor_fail;
}
nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
skblen - sizeof(*nlh), 0);
if (!nlh) {
err = -ENOBUFS;
goto send_vendor_fail_skb;
}
msg = NLMSG_DATA(nlh);
INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
SCSI_NL_SHOST_VENDOR, len);
msg->vendor_id = vendor_id;
msg->host_no = host_no;
msg->vmsg_datalen = data_len; /* bytes */
memcpy(&msg[1], data_buf, data_len);
err = nlmsg_unicast(scsi_nl_sock, skb, pid);
if (err)
/* nlmsg_multicast already kfree_skb'd */
goto send_vendor_fail;
return 0;
send_vendor_fail_skb:
kfree_skb(skb);
send_vendor_fail:
printk(KERN_WARNING
"%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
__func__, host_no, err);
return err;
}
EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
| gpl-2.0 |
grondinm/android_kernel_motorola_msm8974 | drivers/tty/synclink_gt.c | 4929 | 135001 | /*
* Device driver for Microgate SyncLink GT serial adapters.
*
* written by Paul Fulghum for Microgate Corporation
* paulkf@microgate.com
*
* Microgate and SyncLink are trademarks of Microgate Corporation
*
* This code is released under the GNU General Public License (GPL)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* DEBUG OUTPUT DEFINITIONS
*
* uncomment lines below to enable specific types of debug output
*
* DBGINFO information - most verbose output
* DBGERR serious errors
* DBGBH bottom half service routine debugging
* DBGISR interrupt service routine debugging
* DBGDATA output receive and transmit data
* DBGTBUF output transmit DMA buffers and registers
* DBGRBUF output receive DMA buffers and registers
*/
#define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
#define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
#define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
#define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
#define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
/*#define DBGTBUF(info) dump_tbufs(info)*/
/*#define DBGRBUF(info) dump_rbufs(info)*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ioctl.h>
#include <linux/termios.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/hdlc.h>
#include <linux/synclink.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
#define SYNCLINK_GENERIC_HDLC 1
#else
#define SYNCLINK_GENERIC_HDLC 0
#endif
/*
* module identification
*/
static char *driver_name = "SyncLink GT";
static char *tty_driver_name = "synclink_gt";
static char *tty_dev_prefix = "ttySLG";
MODULE_LICENSE("GPL");
#define MGSL_MAGIC 0x5401
#define MAX_DEVICES 32
static struct pci_device_id pci_table[] = {
{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{0,}, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_table);
static int init_one(struct pci_dev *dev,const struct pci_device_id *ent);
static void remove_one(struct pci_dev *dev);
static struct pci_driver pci_driver = {
.name = "synclink_gt",
.id_table = pci_table,
.probe = init_one,
.remove = __devexit_p(remove_one),
};
static bool pci_registered;
/*
* module configuration and status
*/
static struct slgt_info *slgt_device_list;
static int slgt_device_count;
static int ttymajor;
static int debug_level;
static int maxframe[MAX_DEVICES];
module_param(ttymajor, int, 0);
module_param(debug_level, int, 0);
module_param_array(maxframe, int, NULL, 0);
MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
/*
* tty support and callbacks
*/
static struct tty_driver *serial_driver;
static int open(struct tty_struct *tty, struct file * filp);
static void close(struct tty_struct *tty, struct file * filp);
static void hangup(struct tty_struct *tty);
static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
static int write(struct tty_struct *tty, const unsigned char *buf, int count);
static int put_char(struct tty_struct *tty, unsigned char ch);
static void send_xchar(struct tty_struct *tty, char ch);
static void wait_until_sent(struct tty_struct *tty, int timeout);
static int write_room(struct tty_struct *tty);
static void flush_chars(struct tty_struct *tty);
static void flush_buffer(struct tty_struct *tty);
static void tx_hold(struct tty_struct *tty);
static void tx_release(struct tty_struct *tty);
static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
static int chars_in_buffer(struct tty_struct *tty);
static void throttle(struct tty_struct * tty);
static void unthrottle(struct tty_struct * tty);
static int set_break(struct tty_struct *tty, int break_state);
/*
* generic HDLC support and callbacks
*/
#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
static void hdlcdev_tx_done(struct slgt_info *info);
static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
static int hdlcdev_init(struct slgt_info *info);
static void hdlcdev_exit(struct slgt_info *info);
#endif
/*
* device specific structures, macros and functions
*/
#define SLGT_MAX_PORTS 4
#define SLGT_REG_SIZE 256
/*
* conditional wait facility
*/
struct cond_wait {
struct cond_wait *next;
wait_queue_head_t q;
wait_queue_t wait;
unsigned int data;
};
static void init_cond_wait(struct cond_wait *w, unsigned int data);
static void add_cond_wait(struct cond_wait **head, struct cond_wait *w);
static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w);
static void flush_cond_wait(struct cond_wait **head);
/*
* DMA buffer descriptor and access macros
*/
struct slgt_desc
{
__le16 count;
__le16 status;
__le32 pbuf; /* physical address of data buffer */
__le32 next; /* physical address of next descriptor */
/* driver book keeping */
char *buf; /* virtual address of data buffer */
unsigned int pdesc; /* physical address of this descriptor */
dma_addr_t buf_dma_addr;
unsigned short buf_count;
};
#define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
#define desc_count(a) (le16_to_cpu((a).count))
#define desc_status(a) (le16_to_cpu((a).status))
#define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
#define desc_eof(a) (le16_to_cpu((a).status) & BIT2)
#define desc_crc_error(a) (le16_to_cpu((a).status) & BIT1)
#define desc_abort(a) (le16_to_cpu((a).status) & BIT0)
#define desc_residue(a) ((le16_to_cpu((a).status) & 0x38) >> 3)
struct _input_signal_events {
int ri_up;
int ri_down;
int dsr_up;
int dsr_down;
int dcd_up;
int dcd_down;
int cts_up;
int cts_down;
};
/*
* device instance data structure
*/
struct slgt_info {
void *if_ptr; /* General purpose pointer (used by SPPP) */
struct tty_port port;
struct slgt_info *next_device; /* device list link */
int magic;
char device_name[25];
struct pci_dev *pdev;
int port_count; /* count of ports on adapter */
int adapter_num; /* adapter instance number */
int port_num; /* port instance number */
/* array of pointers to port contexts on this adapter */
struct slgt_info *port_array[SLGT_MAX_PORTS];
int line; /* tty line instance number */
struct mgsl_icount icount;
int timeout;
int x_char; /* xon/xoff character */
unsigned int read_status_mask;
unsigned int ignore_status_mask;
wait_queue_head_t status_event_wait_q;
wait_queue_head_t event_wait_q;
struct timer_list tx_timer;
struct timer_list rx_timer;
unsigned int gpio_present;
struct cond_wait *gpio_wait_q;
spinlock_t lock; /* spinlock for synchronizing with ISR */
struct work_struct task;
u32 pending_bh;
bool bh_requested;
bool bh_running;
int isr_overflow;
bool irq_requested; /* true if IRQ requested */
bool irq_occurred; /* for diagnostics use */
/* device configuration */
unsigned int bus_type;
unsigned int irq_level;
unsigned long irq_flags;
unsigned char __iomem * reg_addr; /* memory mapped registers address */
u32 phys_reg_addr;
bool reg_addr_requested;
MGSL_PARAMS params; /* communications parameters */
u32 idle_mode;
u32 max_frame_size; /* as set by device config */
unsigned int rbuf_fill_level;
unsigned int rx_pio;
unsigned int if_mode;
unsigned int base_clock;
unsigned int xsync;
unsigned int xctrl;
/* device status */
bool rx_enabled;
bool rx_restart;
bool tx_enabled;
bool tx_active;
unsigned char signals; /* serial signal states */
int init_error; /* initialization error */
unsigned char *tx_buf;
int tx_count;
char flag_buf[MAX_ASYNC_BUFFER_SIZE];
char char_buf[MAX_ASYNC_BUFFER_SIZE];
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
int dsr_chkcount; /* is floating */
int ri_chkcount;
char *bufs; /* virtual address of DMA buffer lists */
dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
unsigned int rbuf_count;
struct slgt_desc *rbufs;
unsigned int rbuf_current;
unsigned int rbuf_index;
unsigned int rbuf_fill_index;
unsigned short rbuf_fill_count;
unsigned int tbuf_count;
struct slgt_desc *tbufs;
unsigned int tbuf_current;
unsigned int tbuf_start;
unsigned char *tmp_rbuf;
unsigned int tmp_rbuf_count;
/* SPPP/Cisco HDLC device parts */
int netcount;
spinlock_t netlock;
#if SYNCLINK_GENERIC_HDLC
struct net_device *netdev;
#endif
};
static MGSL_PARAMS default_params = {
.mode = MGSL_MODE_HDLC,
.loopback = 0,
.flags = HDLC_FLAG_UNDERRUN_ABORT15,
.encoding = HDLC_ENCODING_NRZI_SPACE,
.clock_speed = 0,
.addr_filter = 0xff,
.crc_type = HDLC_CRC_16_CCITT,
.preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
.preamble = HDLC_PREAMBLE_PATTERN_NONE,
.data_rate = 9600,
.data_bits = 8,
.stop_bits = 1,
.parity = ASYNC_PARITY_NONE
};
#define BH_RECEIVE 1
#define BH_TRANSMIT 2
#define BH_STATUS 4
#define IO_PIN_SHUTDOWN_LIMIT 100
#define DMABUFSIZE 256
#define DESC_LIST_SIZE 4096
#define MASK_PARITY BIT1
#define MASK_FRAMING BIT0
#define MASK_BREAK BIT14
#define MASK_OVERRUN BIT4
#define GSR 0x00 /* global status */
#define JCR 0x04 /* JTAG control */
#define IODR 0x08 /* GPIO direction */
#define IOER 0x0c /* GPIO interrupt enable */
#define IOVR 0x10 /* GPIO value */
#define IOSR 0x14 /* GPIO interrupt status */
#define TDR 0x80 /* tx data */
#define RDR 0x80 /* rx data */
#define TCR 0x82 /* tx control */
#define TIR 0x84 /* tx idle */
#define TPR 0x85 /* tx preamble */
#define RCR 0x86 /* rx control */
#define VCR 0x88 /* V.24 control */
#define CCR 0x89 /* clock control */
#define BDR 0x8a /* baud divisor */
#define SCR 0x8c /* serial control */
#define SSR 0x8e /* serial status */
#define RDCSR 0x90 /* rx DMA control/status */
#define TDCSR 0x94 /* tx DMA control/status */
#define RDDAR 0x98 /* rx DMA descriptor address */
#define TDDAR 0x9c /* tx DMA descriptor address */
#define XSR 0x40 /* extended sync pattern */
#define XCR 0x44 /* extended control */
#define RXIDLE BIT14
#define RXBREAK BIT14
#define IRQ_TXDATA BIT13
#define IRQ_TXIDLE BIT12
#define IRQ_TXUNDER BIT11 /* HDLC */
#define IRQ_RXDATA BIT10
#define IRQ_RXIDLE BIT9 /* HDLC */
#define IRQ_RXBREAK BIT9 /* async */
#define IRQ_RXOVER BIT8
#define IRQ_DSR BIT7
#define IRQ_CTS BIT6
#define IRQ_DCD BIT5
#define IRQ_RI BIT4
#define IRQ_ALL 0x3ff0
#define IRQ_MASTER BIT0
#define slgt_irq_on(info, mask) \
wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
#define slgt_irq_off(info, mask) \
wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
static __u8 rd_reg8(struct slgt_info *info, unsigned int addr);
static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
static void msc_set_vcr(struct slgt_info *info);
static int startup(struct slgt_info *info);
static int block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
static void shutdown(struct slgt_info *info);
static void program_hw(struct slgt_info *info);
static void change_params(struct slgt_info *info);
static int register_test(struct slgt_info *info);
static int irq_test(struct slgt_info *info);
static int loopback_test(struct slgt_info *info);
static int adapter_test(struct slgt_info *info);
static void reset_adapter(struct slgt_info *info);
static void reset_port(struct slgt_info *info);
static void async_mode(struct slgt_info *info);
static void sync_mode(struct slgt_info *info);
static void rx_stop(struct slgt_info *info);
static void rx_start(struct slgt_info *info);
static void reset_rbufs(struct slgt_info *info);
static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
static void rdma_reset(struct slgt_info *info);
static bool rx_get_frame(struct slgt_info *info);
static bool rx_get_buf(struct slgt_info *info);
static void tx_start(struct slgt_info *info);
static void tx_stop(struct slgt_info *info);
static void tx_set_idle(struct slgt_info *info);
static unsigned int free_tbuf_count(struct slgt_info *info);
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
static void get_signals(struct slgt_info *info);
static void set_signals(struct slgt_info *info);
static void enable_loopback(struct slgt_info *info);
static void set_rate(struct slgt_info *info, u32 data_rate);
static int bh_action(struct slgt_info *info);
static void bh_handler(struct work_struct *work);
static void bh_transmit(struct slgt_info *info);
static void isr_serial(struct slgt_info *info);
static void isr_rdma(struct slgt_info *info);
static void isr_txeom(struct slgt_info *info, unsigned short status);
static void isr_tdma(struct slgt_info *info);
static int alloc_dma_bufs(struct slgt_info *info);
static void free_dma_bufs(struct slgt_info *info);
static int alloc_desc(struct slgt_info *info);
static void free_desc(struct slgt_info *info);
static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
static int alloc_tmp_rbuf(struct slgt_info *info);
static void free_tmp_rbuf(struct slgt_info *info);
static void tx_timeout(unsigned long context);
static void rx_timeout(unsigned long context);
/*
* ioctl handlers
*/
static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
static int get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
static int set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
static int get_txidle(struct slgt_info *info, int __user *idle_mode);
static int set_txidle(struct slgt_info *info, int idle_mode);
static int tx_enable(struct slgt_info *info, int enable);
static int tx_abort(struct slgt_info *info);
static int rx_enable(struct slgt_info *info, int enable);
static int modem_input_wait(struct slgt_info *info,int arg);
static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
static int tiocmget(struct tty_struct *tty);
static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int set_break(struct tty_struct *tty, int break_state);
static int get_interface(struct slgt_info *info, int __user *if_mode);
static int set_interface(struct slgt_info *info, int if_mode);
static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
static int get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
static int get_xsync(struct slgt_info *info, int __user *if_mode);
static int set_xsync(struct slgt_info *info, int if_mode);
static int get_xctrl(struct slgt_info *info, int __user *if_mode);
static int set_xctrl(struct slgt_info *info, int if_mode);
/*
* driver functions
*/
static void add_device(struct slgt_info *info);
static void device_init(int adapter_num, struct pci_dev *pdev);
static int claim_resources(struct slgt_info *info);
static void release_resources(struct slgt_info *info);
/*
* DEBUG OUTPUT CODE
*/
#ifndef DBGINFO
#define DBGINFO(fmt)
#endif
#ifndef DBGERR
#define DBGERR(fmt)
#endif
#ifndef DBGBH
#define DBGBH(fmt)
#endif
#ifndef DBGISR
#define DBGISR(fmt)
#endif
#ifdef DBGDATA
static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
{
int i;
int linecount;
printk("%s %s data:\n",info->device_name, label);
while(count) {
linecount = (count > 16) ? 16 : count;
for(i=0; i < linecount; i++)
printk("%02X ",(unsigned char)data[i]);
for(;i<17;i++)
printk(" ");
for(i=0;i<linecount;i++) {
if (data[i]>=040 && data[i]<=0176)
printk("%c",data[i]);
else
printk(".");
}
printk("\n");
data += linecount;
count -= linecount;
}
}
#else
#define DBGDATA(info, buf, size, label)
#endif
#ifdef DBGTBUF
static void dump_tbufs(struct slgt_info *info)
{
int i;
printk("tbuf_current=%d\n", info->tbuf_current);
for (i=0 ; i < info->tbuf_count ; i++) {
printk("%d: count=%04X status=%04X\n",
i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
}
}
#else
#define DBGTBUF(info)
#endif
#ifdef DBGRBUF
static void dump_rbufs(struct slgt_info *info)
{
int i;
printk("rbuf_current=%d\n", info->rbuf_current);
for (i=0 ; i < info->rbuf_count ; i++) {
printk("%d: count=%04X status=%04X\n",
i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
}
}
#else
#define DBGRBUF(info)
#endif
static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
{
#ifdef SANITY_CHECK
if (!info) {
printk("null struct slgt_info for (%s) in %s\n", devname, name);
return 1;
}
if (info->magic != MGSL_MAGIC) {
printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
return 1;
}
#else
if (!info)
return 1;
#endif
return 0;
}
/**
* line discipline callback wrappers
*
* The wrappers maintain line discipline references
* while calling into the line discipline.
*
* ldisc_receive_buf - pass receive data to line discipline
*/
static void ldisc_receive_buf(struct tty_struct *tty,
const __u8 *data, char *flags, int count)
{
struct tty_ldisc *ld;
if (!tty)
return;
ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->receive_buf)
ld->ops->receive_buf(tty, data, flags, count);
tty_ldisc_deref(ld);
}
}
/* tty callbacks */
static int open(struct tty_struct *tty, struct file *filp)
{
struct slgt_info *info;
int retval, line;
unsigned long flags;
line = tty->index;
if (line >= slgt_device_count) {
DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
return -ENODEV;
}
info = slgt_device_list;
while(info && info->line != line)
info = info->next_device;
if (sanity_check(info, tty->name, "open"))
return -ENODEV;
if (info->init_error) {
DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
return -ENODEV;
}
tty->driver_data = info;
info->port.tty = tty;
DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
if (info->port.flags & ASYNC_CLOSING)
interruptible_sleep_on(&info->port.close_wait);
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
}
mutex_lock(&info->port.mutex);
info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
retval = -EBUSY;
spin_unlock_irqrestore(&info->netlock, flags);
mutex_unlock(&info->port.mutex);
goto cleanup;
}
info->port.count++;
spin_unlock_irqrestore(&info->netlock, flags);
if (info->port.count == 1) {
/* 1st open on this device, init hardware */
retval = startup(info);
if (retval < 0) {
mutex_unlock(&info->port.mutex);
goto cleanup;
}
}
mutex_unlock(&info->port.mutex);
retval = block_til_ready(tty, filp, info);
if (retval) {
DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
goto cleanup;
}
retval = 0;
cleanup:
if (retval) {
if (tty->count == 1)
info->port.tty = NULL; /* tty layer will release tty struct */
if(info->port.count)
info->port.count--;
}
DBGINFO(("%s open rc=%d\n", info->device_name, retval));
return retval;
}
static void close(struct tty_struct *tty, struct file *filp)
{
struct slgt_info *info = tty->driver_data;
if (sanity_check(info, tty->name, "close"))
return;
DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
if (tty_port_close_start(&info->port, tty, filp) == 0)
goto cleanup;
mutex_lock(&info->port.mutex);
if (info->port.flags & ASYNC_INITIALIZED)
wait_until_sent(tty, info->timeout);
flush_buffer(tty);
tty_ldisc_flush(tty);
shutdown(info);
mutex_unlock(&info->port.mutex);
tty_port_close_end(&info->port, tty);
info->port.tty = NULL;
cleanup:
DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
}
static void hangup(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "hangup"))
return;
DBGINFO(("%s hangup\n", info->device_name));
flush_buffer(tty);
mutex_lock(&info->port.mutex);
shutdown(info);
spin_lock_irqsave(&info->port.lock, flags);
info->port.count = 0;
info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
info->port.tty = NULL;
spin_unlock_irqrestore(&info->port.lock, flags);
mutex_unlock(&info->port.mutex);
wake_up_interruptible(&info->port.open_wait);
}
static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
DBGINFO(("%s set_termios\n", tty->driver->name));
change_params(info);
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios->c_cflag & CBAUD)) {
info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) &&
tty->termios->c_cflag & CBAUD) {
info->signals |= SerialSignal_DTR;
if (!(tty->termios->c_cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags)) {
info->signals |= SerialSignal_RTS;
}
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
/* Handle turning off CRTSCTS */
if (old_termios->c_cflag & CRTSCTS &&
!(tty->termios->c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
tx_release(tty);
}
}
static void update_tx_timer(struct slgt_info *info)
{
/*
* use worst case speed of 1200bps to calculate transmit timeout
* based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
*/
if (info->params.mode == MGSL_MODE_HDLC) {
int timeout = (tbuf_bytes(info) * 7) + 1000;
mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
}
}
static int write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
int ret = 0;
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "write"))
return -EIO;
DBGINFO(("%s write count=%d\n", info->device_name, count));
if (!info->tx_buf || (count > info->max_frame_size))
return -EIO;
if (!count || tty->stopped || tty->hw_stopped)
return 0;
spin_lock_irqsave(&info->lock, flags);
if (info->tx_count) {
/* send accumulated data from send_char() */
if (!tx_load(info, info->tx_buf, info->tx_count))
goto cleanup;
info->tx_count = 0;
}
if (tx_load(info, buf, count))
ret = count;
cleanup:
spin_unlock_irqrestore(&info->lock, flags);
DBGINFO(("%s write rc=%d\n", info->device_name, ret));
return ret;
}
static int put_char(struct tty_struct *tty, unsigned char ch)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
int ret = 0;
if (sanity_check(info, tty->name, "put_char"))
return 0;
DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
if (!info->tx_buf)
return 0;
spin_lock_irqsave(&info->lock,flags);
if (info->tx_count < info->max_frame_size) {
info->tx_buf[info->tx_count++] = ch;
ret = 1;
}
spin_unlock_irqrestore(&info->lock,flags);
return ret;
}
static void send_xchar(struct tty_struct *tty, char ch)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "send_xchar"))
return;
DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
info->x_char = ch;
if (ch) {
spin_lock_irqsave(&info->lock,flags);
if (!info->tx_enabled)
tx_start(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
static void wait_until_sent(struct tty_struct *tty, int timeout)
{
struct slgt_info *info = tty->driver_data;
unsigned long orig_jiffies, char_time;
if (!info )
return;
if (sanity_check(info, tty->name, "wait_until_sent"))
return;
DBGINFO(("%s wait_until_sent entry\n", info->device_name));
if (!(info->port.flags & ASYNC_INITIALIZED))
goto exit;
orig_jiffies = jiffies;
/* Set check interval to 1/5 of estimated time to
* send a character, and make it at least 1. The check
* interval should also be less than the timeout.
* Note: use tight timings here to satisfy the NIST-PCTS.
*/
if (info->params.data_rate) {
char_time = info->timeout/(32 * 5);
if (!char_time)
char_time++;
} else
char_time = 1;
if (timeout)
char_time = min_t(unsigned long, char_time, timeout);
while (info->tx_active) {
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
exit:
DBGINFO(("%s wait_until_sent exit\n", info->device_name));
}
static int write_room(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
int ret;
if (sanity_check(info, tty->name, "write_room"))
return 0;
ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
DBGINFO(("%s write_room=%d\n", info->device_name, ret));
return ret;
}
static void flush_chars(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "flush_chars"))
return;
DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
if (info->tx_count <= 0 || tty->stopped ||
tty->hw_stopped || !info->tx_buf)
return;
DBGINFO(("%s flush_chars start transmit\n", info->device_name));
spin_lock_irqsave(&info->lock,flags);
if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
info->tx_count = 0;
spin_unlock_irqrestore(&info->lock,flags);
}
static void flush_buffer(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "flush_buffer"))
return;
DBGINFO(("%s flush_buffer\n", info->device_name));
spin_lock_irqsave(&info->lock, flags);
info->tx_count = 0;
spin_unlock_irqrestore(&info->lock, flags);
tty_wakeup(tty);
}
/*
* throttle (stop) transmitter
*/
static void tx_hold(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "tx_hold"))
return;
DBGINFO(("%s tx_hold\n", info->device_name));
spin_lock_irqsave(&info->lock,flags);
if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
tx_stop(info);
spin_unlock_irqrestore(&info->lock,flags);
}
/*
* release (start) transmitter
*/
static void tx_release(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "tx_release"))
return;
DBGINFO(("%s tx_release\n", info->device_name));
spin_lock_irqsave(&info->lock, flags);
if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
info->tx_count = 0;
spin_unlock_irqrestore(&info->lock, flags);
}
/*
* Service an IOCTL request
*
* Arguments
*
* tty pointer to tty instance data
* cmd IOCTL command code
* arg command argument/context
*
* Return 0 if success, otherwise error code
*/
static int ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct slgt_info *info = tty->driver_data;
void __user *argp = (void __user *)arg;
int ret;
if (sanity_check(info, tty->name, "ioctl"))
return -ENODEV;
DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCMIWAIT)) {
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
}
switch (cmd) {
case MGSL_IOCWAITEVENT:
return wait_mgsl_event(info, argp);
case TIOCMIWAIT:
return modem_input_wait(info,(int)arg);
case MGSL_IOCSGPIO:
return set_gpio(info, argp);
case MGSL_IOCGGPIO:
return get_gpio(info, argp);
case MGSL_IOCWAITGPIO:
return wait_gpio(info, argp);
case MGSL_IOCGXSYNC:
return get_xsync(info, argp);
case MGSL_IOCSXSYNC:
return set_xsync(info, (int)arg);
case MGSL_IOCGXCTRL:
return get_xctrl(info, argp);
case MGSL_IOCSXCTRL:
return set_xctrl(info, (int)arg);
}
mutex_lock(&info->port.mutex);
switch (cmd) {
case MGSL_IOCGPARAMS:
ret = get_params(info, argp);
break;
case MGSL_IOCSPARAMS:
ret = set_params(info, argp);
break;
case MGSL_IOCGTXIDLE:
ret = get_txidle(info, argp);
break;
case MGSL_IOCSTXIDLE:
ret = set_txidle(info, (int)arg);
break;
case MGSL_IOCTXENABLE:
ret = tx_enable(info, (int)arg);
break;
case MGSL_IOCRXENABLE:
ret = rx_enable(info, (int)arg);
break;
case MGSL_IOCTXABORT:
ret = tx_abort(info);
break;
case MGSL_IOCGSTATS:
ret = get_stats(info, argp);
break;
case MGSL_IOCGIF:
ret = get_interface(info, argp);
break;
case MGSL_IOCSIF:
ret = set_interface(info,(int)arg);
break;
default:
ret = -ENOIOCTLCMD;
}
mutex_unlock(&info->port.mutex);
return ret;
}
static int get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct slgt_info *info = tty->driver_data;
struct mgsl_icount cnow; /* kernel counter temps */
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->lock,flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
/*
* support for 32 bit ioctl calls on 64 bit systems
*/
#ifdef CONFIG_COMPAT
static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params)
{
struct MGSL_PARAMS32 tmp_params;
DBGINFO(("%s get_params32\n", info->device_name));
memset(&tmp_params, 0, sizeof(tmp_params));
tmp_params.mode = (compat_ulong_t)info->params.mode;
tmp_params.loopback = info->params.loopback;
tmp_params.flags = info->params.flags;
tmp_params.encoding = info->params.encoding;
tmp_params.clock_speed = (compat_ulong_t)info->params.clock_speed;
tmp_params.addr_filter = info->params.addr_filter;
tmp_params.crc_type = info->params.crc_type;
tmp_params.preamble_length = info->params.preamble_length;
tmp_params.preamble = info->params.preamble;
tmp_params.data_rate = (compat_ulong_t)info->params.data_rate;
tmp_params.data_bits = info->params.data_bits;
tmp_params.stop_bits = info->params.stop_bits;
tmp_params.parity = info->params.parity;
if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32)))
return -EFAULT;
return 0;
}
static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
{
struct MGSL_PARAMS32 tmp_params;
DBGINFO(("%s set_params32\n", info->device_name));
if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
return -EFAULT;
spin_lock(&info->lock);
if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
info->base_clock = tmp_params.clock_speed;
} else {
info->params.mode = tmp_params.mode;
info->params.loopback = tmp_params.loopback;
info->params.flags = tmp_params.flags;
info->params.encoding = tmp_params.encoding;
info->params.clock_speed = tmp_params.clock_speed;
info->params.addr_filter = tmp_params.addr_filter;
info->params.crc_type = tmp_params.crc_type;
info->params.preamble_length = tmp_params.preamble_length;
info->params.preamble = tmp_params.preamble;
info->params.data_rate = tmp_params.data_rate;
info->params.data_bits = tmp_params.data_bits;
info->params.stop_bits = tmp_params.stop_bits;
info->params.parity = tmp_params.parity;
}
spin_unlock(&info->lock);
program_hw(info);
return 0;
}
static long slgt_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct slgt_info *info = tty->driver_data;
int rc = -ENOIOCTLCMD;
if (sanity_check(info, tty->name, "compat_ioctl"))
return -ENODEV;
DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
switch (cmd) {
case MGSL_IOCSPARAMS32:
rc = set_params32(info, compat_ptr(arg));
break;
case MGSL_IOCGPARAMS32:
rc = get_params32(info, compat_ptr(arg));
break;
case MGSL_IOCGPARAMS:
case MGSL_IOCSPARAMS:
case MGSL_IOCGTXIDLE:
case MGSL_IOCGSTATS:
case MGSL_IOCWAITEVENT:
case MGSL_IOCGIF:
case MGSL_IOCSGPIO:
case MGSL_IOCGGPIO:
case MGSL_IOCWAITGPIO:
case MGSL_IOCGXSYNC:
case MGSL_IOCGXCTRL:
case MGSL_IOCSTXIDLE:
case MGSL_IOCTXENABLE:
case MGSL_IOCRXENABLE:
case MGSL_IOCTXABORT:
case TIOCMIWAIT:
case MGSL_IOCSIF:
case MGSL_IOCSXSYNC:
case MGSL_IOCSXCTRL:
rc = ioctl(tty, cmd, arg);
break;
}
DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
return rc;
}
#else
#define slgt_compat_ioctl NULL
#endif /* ifdef CONFIG_COMPAT */
/*
* proc fs support
*/
static inline void line_info(struct seq_file *m, struct slgt_info *info)
{
char stat_buf[30];
unsigned long flags;
seq_printf(m, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
info->device_name, info->phys_reg_addr,
info->irq_level, info->max_frame_size);
/* output current serial signal states */
spin_lock_irqsave(&info->lock,flags);
get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
stat_buf[0] = 0;
stat_buf[1] = 0;
if (info->signals & SerialSignal_RTS)
strcat(stat_buf, "|RTS");
if (info->signals & SerialSignal_CTS)
strcat(stat_buf, "|CTS");
if (info->signals & SerialSignal_DTR)
strcat(stat_buf, "|DTR");
if (info->signals & SerialSignal_DSR)
strcat(stat_buf, "|DSR");
if (info->signals & SerialSignal_DCD)
strcat(stat_buf, "|CD");
if (info->signals & SerialSignal_RI)
strcat(stat_buf, "|RI");
if (info->params.mode != MGSL_MODE_ASYNC) {
seq_printf(m, "\tHDLC txok:%d rxok:%d",
info->icount.txok, info->icount.rxok);
if (info->icount.txunder)
seq_printf(m, " txunder:%d", info->icount.txunder);
if (info->icount.txabort)
seq_printf(m, " txabort:%d", info->icount.txabort);
if (info->icount.rxshort)
seq_printf(m, " rxshort:%d", info->icount.rxshort);
if (info->icount.rxlong)
seq_printf(m, " rxlong:%d", info->icount.rxlong);
if (info->icount.rxover)
seq_printf(m, " rxover:%d", info->icount.rxover);
if (info->icount.rxcrc)
seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
} else {
seq_printf(m, "\tASYNC tx:%d rx:%d",
info->icount.tx, info->icount.rx);
if (info->icount.frame)
seq_printf(m, " fe:%d", info->icount.frame);
if (info->icount.parity)
seq_printf(m, " pe:%d", info->icount.parity);
if (info->icount.brk)
seq_printf(m, " brk:%d", info->icount.brk);
if (info->icount.overrun)
seq_printf(m, " oe:%d", info->icount.overrun);
}
/* Append serial signal status to end */
seq_printf(m, " %s\n", stat_buf+1);
seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
info->tx_active,info->bh_requested,info->bh_running,
info->pending_bh);
}
/* Called to print information about devices
*/
static int synclink_gt_proc_show(struct seq_file *m, void *v)
{
struct slgt_info *info;
seq_puts(m, "synclink_gt driver\n");
info = slgt_device_list;
while( info ) {
line_info(m, info);
info = info->next_device;
}
return 0;
}
static int synclink_gt_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, synclink_gt_proc_show, NULL);
}
static const struct file_operations synclink_gt_proc_fops = {
.owner = THIS_MODULE,
.open = synclink_gt_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* return count of bytes in transmit buffer
*/
static int chars_in_buffer(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
int count;
if (sanity_check(info, tty->name, "chars_in_buffer"))
return 0;
count = tbuf_bytes(info);
DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count));
return count;
}
/*
* signal remote device to throttle send data (our receive data)
*/
static void throttle(struct tty_struct * tty)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "throttle"))
return;
DBGINFO(("%s throttle\n", info->device_name));
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
if (tty->termios->c_cflag & CRTSCTS) {
spin_lock_irqsave(&info->lock,flags);
info->signals &= ~SerialSignal_RTS;
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
/*
* signal remote device to stop throttling send data (our receive data)
*/
static void unthrottle(struct tty_struct * tty)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
if (sanity_check(info, tty->name, "unthrottle"))
return;
DBGINFO(("%s unthrottle\n", info->device_name));
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
send_xchar(tty, START_CHAR(tty));
}
if (tty->termios->c_cflag & CRTSCTS) {
spin_lock_irqsave(&info->lock,flags);
info->signals |= SerialSignal_RTS;
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
/*
* set or clear transmit break condition
* break_state -1=set break condition, 0=clear
*/
static int set_break(struct tty_struct *tty, int break_state)
{
struct slgt_info *info = tty->driver_data;
unsigned short value;
unsigned long flags;
if (sanity_check(info, tty->name, "set_break"))
return -EINVAL;
DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
spin_lock_irqsave(&info->lock,flags);
value = rd_reg16(info, TCR);
if (break_state == -1)
value |= BIT6;
else
value &= ~BIT6;
wr_reg16(info, TCR, value);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
#if SYNCLINK_GENERIC_HDLC
/**
* called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
* set encoding and frame check sequence (FCS) options
*
* dev pointer to network device structure
* encoding serial encoding setting
* parity FCS setting
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
struct slgt_info *info = dev_to_port(dev);
unsigned char new_encoding;
unsigned short new_crctype;
/* return error if TTY interface open */
if (info->port.count)
return -EBUSY;
DBGINFO(("%s hdlcdev_attach\n", info->device_name));
switch (encoding)
{
case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
default: return -EINVAL;
}
switch (parity)
{
case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
default: return -EINVAL;
}
info->params.encoding = new_encoding;
info->params.crc_type = new_crctype;
/* if network interface up, reprogram hardware */
if (info->netcount)
program_hw(info);
return 0;
}
/**
* called by generic HDLC layer to send frame
*
* skb socket buffer containing HDLC frame
* dev pointer to network device structure
*/
static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct slgt_info *info = dev_to_port(dev);
unsigned long flags;
DBGINFO(("%s hdlc_xmit\n", dev->name));
if (!skb->len)
return NETDEV_TX_OK;
/* stop sending until this frame completes */
netif_stop_queue(dev);
/* update network statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
spin_lock_irqsave(&info->lock, flags);
tx_load(info, skb->data, skb->len);
spin_unlock_irqrestore(&info->lock, flags);
/* done with socket buffer, so free it */
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/**
* called by network layer when interface enabled
* claim resources and initialize hardware
*
* dev pointer to network device structure
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_open(struct net_device *dev)
{
struct slgt_info *info = dev_to_port(dev);
int rc;
unsigned long flags;
if (!try_module_get(THIS_MODULE))
return -EBUSY;
DBGINFO(("%s hdlcdev_open\n", dev->name));
/* generic HDLC layer open processing */
if ((rc = hdlc_open(dev)))
return rc;
/* arbitrate between network and tty opens */
spin_lock_irqsave(&info->netlock, flags);
if (info->port.count != 0 || info->netcount != 0) {
DBGINFO(("%s hdlc_open busy\n", dev->name));
spin_unlock_irqrestore(&info->netlock, flags);
return -EBUSY;
}
info->netcount=1;
spin_unlock_irqrestore(&info->netlock, flags);
/* claim resources and init adapter */
if ((rc = startup(info)) != 0) {
spin_lock_irqsave(&info->netlock, flags);
info->netcount=0;
spin_unlock_irqrestore(&info->netlock, flags);
return rc;
}
/* assert DTR and RTS, apply hardware settings */
info->signals |= SerialSignal_RTS + SerialSignal_DTR;
program_hw(info);
/* enable network layer transmit */
dev->trans_start = jiffies;
netif_start_queue(dev);
/* inform generic HDLC layer of current DCD status */
spin_lock_irqsave(&info->lock, flags);
get_signals(info);
spin_unlock_irqrestore(&info->lock, flags);
if (info->signals & SerialSignal_DCD)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
return 0;
}
/**
* called by network layer when interface is disabled
* shutdown hardware and release resources
*
* dev pointer to network device structure
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_close(struct net_device *dev)
{
struct slgt_info *info = dev_to_port(dev);
unsigned long flags;
DBGINFO(("%s hdlcdev_close\n", dev->name));
netif_stop_queue(dev);
/* shutdown adapter and release resources */
shutdown(info);
hdlc_close(dev);
spin_lock_irqsave(&info->netlock, flags);
info->netcount=0;
spin_unlock_irqrestore(&info->netlock, flags);
module_put(THIS_MODULE);
return 0;
}
/**
* called by network layer to process IOCTL call to network device
*
* dev pointer to network device structure
* ifr pointer to network interface request structure
* cmd IOCTL command code
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
struct slgt_info *info = dev_to_port(dev);
unsigned int flags;
DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
/* return error if TTY interface open */
if (info->port.count)
return -EBUSY;
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
memset(&new_line, 0, sizeof(new_line));
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
switch (flags){
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
default: new_line.clock_type = CLOCK_DEFAULT;
}
new_line.clock_rate = info->params.clock_speed;
new_line.loopback = info->params.loopback ? 1:0;
if (copy_to_user(line, &new_line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
if(!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
switch (new_line.clock_type)
{
case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
case CLOCK_DEFAULT: flags = info->params.flags &
(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
default: return -EINVAL;
}
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
info->params.flags |= flags;
info->params.loopback = new_line.loopback;
if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
info->params.clock_speed = new_line.clock_rate;
else
info->params.clock_speed = 0;
/* if network interface up, reprogram hardware */
if (info->netcount)
program_hw(info);
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
/**
* called by network layer when transmit timeout is detected
*
* dev pointer to network device structure
*/
static void hdlcdev_tx_timeout(struct net_device *dev)
{
struct slgt_info *info = dev_to_port(dev);
unsigned long flags;
DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
spin_lock_irqsave(&info->lock,flags);
tx_stop(info);
spin_unlock_irqrestore(&info->lock,flags);
netif_wake_queue(dev);
}
/**
* called by device driver when transmit completes
* reenable network layer transmit if stopped
*
* info pointer to device instance information
*/
static void hdlcdev_tx_done(struct slgt_info *info)
{
if (netif_queue_stopped(info->netdev))
netif_wake_queue(info->netdev);
}
/**
* called by device driver when frame received
* pass frame to network layer
*
* info pointer to device instance information
* buf pointer to buffer contianing frame data
* size count of data bytes in buf
*/
static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
{
struct sk_buff *skb = dev_alloc_skb(size);
struct net_device *dev = info->netdev;
DBGINFO(("%s hdlcdev_rx\n", dev->name));
if (skb == NULL) {
DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
dev->stats.rx_dropped++;
return;
}
memcpy(skb_put(skb, size), buf, size);
skb->protocol = hdlc_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += size;
netif_rx(skb);
}
static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
};
/**
* called by device driver when adding device instance
* do generic HDLC initialization
*
* info pointer to device instance information
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_init(struct slgt_info *info)
{
int rc;
struct net_device *dev;
hdlc_device *hdlc;
/* allocate and initialize network and HDLC layer objects */
if (!(dev = alloc_hdlcdev(info))) {
printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
return -ENOMEM;
}
/* for network layer reporting purposes only */
dev->mem_start = info->phys_reg_addr;
dev->mem_end = info->phys_reg_addr + SLGT_REG_SIZE - 1;
dev->irq = info->irq_level;
/* network layer callbacks and settings */
dev->netdev_ops = &hdlcdev_ops;
dev->watchdog_timeo = 10 * HZ;
dev->tx_queue_len = 50;
/* generic HDLC layer callbacks and settings */
hdlc = dev_to_hdlc(dev);
hdlc->attach = hdlcdev_attach;
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
if ((rc = register_hdlc_device(dev))) {
printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
free_netdev(dev);
return rc;
}
info->netdev = dev;
return 0;
}
/**
* called by device driver when removing device instance
* do generic HDLC cleanup
*
* info pointer to device instance information
*/
static void hdlcdev_exit(struct slgt_info *info)
{
unregister_hdlc_device(info->netdev);
free_netdev(info->netdev);
info->netdev = NULL;
}
#endif /* ifdef CONFIG_HDLC */
/*
* get async data from rx DMA buffers
*/
static void rx_async(struct slgt_info *info)
{
struct tty_struct *tty = info->port.tty;
struct mgsl_icount *icount = &info->icount;
unsigned int start, end;
unsigned char *p;
unsigned char status;
struct slgt_desc *bufs = info->rbufs;
int i, count;
int chars = 0;
int stat;
unsigned char ch;
start = end = info->rbuf_current;
while(desc_complete(bufs[end])) {
count = desc_count(bufs[end]) - info->rbuf_index;
p = bufs[end].buf + info->rbuf_index;
DBGISR(("%s rx_async count=%d\n", info->device_name, count));
DBGDATA(info, p, count, "rx");
for(i=0 ; i < count; i+=2, p+=2) {
ch = *p;
icount->rx++;
stat = 0;
if ((status = *(p+1) & (BIT1 + BIT0))) {
if (status & BIT1)
icount->parity++;
else if (status & BIT0)
icount->frame++;
/* discard char if tty control flags say so */
if (status & info->ignore_status_mask)
continue;
if (status & BIT1)
stat = TTY_PARITY;
else if (status & BIT0)
stat = TTY_FRAME;
}
if (tty) {
tty_insert_flip_char(tty, ch, stat);
chars++;
}
}
if (i < count) {
/* receive buffer not completed */
info->rbuf_index += i;
mod_timer(&info->rx_timer, jiffies + 1);
break;
}
info->rbuf_index = 0;
free_rbufs(info, end, end);
if (++end == info->rbuf_count)
end = 0;
/* if entire list searched then no frame available */
if (end == start)
break;
}
if (tty && chars)
tty_flip_buffer_push(tty);
}
/*
* return next bottom half action to perform
*/
static int bh_action(struct slgt_info *info)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&info->lock,flags);
if (info->pending_bh & BH_RECEIVE) {
info->pending_bh &= ~BH_RECEIVE;
rc = BH_RECEIVE;
} else if (info->pending_bh & BH_TRANSMIT) {
info->pending_bh &= ~BH_TRANSMIT;
rc = BH_TRANSMIT;
} else if (info->pending_bh & BH_STATUS) {
info->pending_bh &= ~BH_STATUS;
rc = BH_STATUS;
} else {
/* Mark BH routine as complete */
info->bh_running = false;
info->bh_requested = false;
rc = 0;
}
spin_unlock_irqrestore(&info->lock,flags);
return rc;
}
/*
* perform bottom half processing
*/
static void bh_handler(struct work_struct *work)
{
struct slgt_info *info = container_of(work, struct slgt_info, task);
int action;
if (!info)
return;
info->bh_running = true;
while((action = bh_action(info))) {
switch (action) {
case BH_RECEIVE:
DBGBH(("%s bh receive\n", info->device_name));
switch(info->params.mode) {
case MGSL_MODE_ASYNC:
rx_async(info);
break;
case MGSL_MODE_HDLC:
while(rx_get_frame(info));
break;
case MGSL_MODE_RAW:
case MGSL_MODE_MONOSYNC:
case MGSL_MODE_BISYNC:
case MGSL_MODE_XSYNC:
while(rx_get_buf(info));
break;
}
/* restart receiver if rx DMA buffers exhausted */
if (info->rx_restart)
rx_start(info);
break;
case BH_TRANSMIT:
bh_transmit(info);
break;
case BH_STATUS:
DBGBH(("%s bh status\n", info->device_name));
info->ri_chkcount = 0;
info->dsr_chkcount = 0;
info->dcd_chkcount = 0;
info->cts_chkcount = 0;
break;
default:
DBGBH(("%s unknown action\n", info->device_name));
break;
}
}
DBGBH(("%s bh_handler exit\n", info->device_name));
}
static void bh_transmit(struct slgt_info *info)
{
struct tty_struct *tty = info->port.tty;
DBGBH(("%s bh_transmit\n", info->device_name));
if (tty)
tty_wakeup(tty);
}
static void dsr_change(struct slgt_info *info, unsigned short status)
{
if (status & BIT3) {
info->signals |= SerialSignal_DSR;
info->input_signal_events.dsr_up++;
} else {
info->signals &= ~SerialSignal_DSR;
info->input_signal_events.dsr_down++;
}
DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_DSR);
return;
}
info->icount.dsr++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
}
static void cts_change(struct slgt_info *info, unsigned short status)
{
if (status & BIT2) {
info->signals |= SerialSignal_CTS;
info->input_signal_events.cts_up++;
} else {
info->signals &= ~SerialSignal_CTS;
info->input_signal_events.cts_down++;
}
DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_CTS);
return;
}
info->icount.cts++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
if (info->port.flags & ASYNC_CTS_FLOW) {
if (info->port.tty) {
if (info->port.tty->hw_stopped) {
if (info->signals & SerialSignal_CTS) {
info->port.tty->hw_stopped = 0;
info->pending_bh |= BH_TRANSMIT;
return;
}
} else {
if (!(info->signals & SerialSignal_CTS))
info->port.tty->hw_stopped = 1;
}
}
}
}
static void dcd_change(struct slgt_info *info, unsigned short status)
{
if (status & BIT1) {
info->signals |= SerialSignal_DCD;
info->input_signal_events.dcd_up++;
} else {
info->signals &= ~SerialSignal_DCD;
info->input_signal_events.dcd_down++;
}
DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_DCD);
return;
}
info->icount.dcd++;
#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (info->signals & SerialSignal_DCD)
netif_carrier_on(info->netdev);
else
netif_carrier_off(info->netdev);
}
#endif
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
if (info->port.flags & ASYNC_CHECK_CD) {
if (info->signals & SerialSignal_DCD)
wake_up_interruptible(&info->port.open_wait);
else {
if (info->port.tty)
tty_hangup(info->port.tty);
}
}
}
static void ri_change(struct slgt_info *info, unsigned short status)
{
if (status & BIT0) {
info->signals |= SerialSignal_RI;
info->input_signal_events.ri_up++;
} else {
info->signals &= ~SerialSignal_RI;
info->input_signal_events.ri_down++;
}
DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_RI);
return;
}
info->icount.rng++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
}
static void isr_rxdata(struct slgt_info *info)
{
unsigned int count = info->rbuf_fill_count;
unsigned int i = info->rbuf_fill_index;
unsigned short reg;
while (rd_reg16(info, SSR) & IRQ_RXDATA) {
reg = rd_reg16(info, RDR);
DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
if (desc_complete(info->rbufs[i])) {
/* all buffers full */
rx_stop(info);
info->rx_restart = 1;
continue;
}
info->rbufs[i].buf[count++] = (unsigned char)reg;
/* async mode saves status byte to buffer for each data byte */
if (info->params.mode == MGSL_MODE_ASYNC)
info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
if (count == info->rbuf_fill_level || (reg & BIT10)) {
/* buffer full or end of frame */
set_desc_count(info->rbufs[i], count);
set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
info->rbuf_fill_count = count = 0;
if (++i == info->rbuf_count)
i = 0;
info->pending_bh |= BH_RECEIVE;
}
}
info->rbuf_fill_index = i;
info->rbuf_fill_count = count;
}
static void isr_serial(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
wr_reg16(info, SSR, status); /* clear pending */
info->irq_occurred = true;
if (info->params.mode == MGSL_MODE_ASYNC) {
if (status & IRQ_TXIDLE) {
if (info->tx_active)
isr_txeom(info, status);
}
if (info->rx_pio && (status & IRQ_RXDATA))
isr_rxdata(info);
if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
info->icount.brk++;
/* process break detection if tty control allows */
if (info->port.tty) {
if (!(status & info->ignore_status_mask)) {
if (info->read_status_mask & MASK_BREAK) {
tty_insert_flip_char(info->port.tty, 0, TTY_BREAK);
if (info->port.flags & ASYNC_SAK)
do_SAK(info->port.tty);
}
}
}
}
} else {
if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
isr_txeom(info, status);
if (info->rx_pio && (status & IRQ_RXDATA))
isr_rxdata(info);
if (status & IRQ_RXIDLE) {
if (status & RXIDLE)
info->icount.rxidle++;
else
info->icount.exithunt++;
wake_up_interruptible(&info->event_wait_q);
}
if (status & IRQ_RXOVER)
rx_start(info);
}
if (status & IRQ_DSR)
dsr_change(info, status);
if (status & IRQ_CTS)
cts_change(info, status);
if (status & IRQ_DCD)
dcd_change(info, status);
if (status & IRQ_RI)
ri_change(info, status);
}
static void isr_rdma(struct slgt_info *info)
{
unsigned int status = rd_reg32(info, RDCSR);
DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
/* RDCSR (rx DMA control/status)
*
* 31..07 reserved
* 06 save status byte to DMA buffer
* 05 error
* 04 eol (end of list)
* 03 eob (end of buffer)
* 02 IRQ enable
* 01 reset
* 00 enable
*/
wr_reg32(info, RDCSR, status); /* clear pending */
if (status & (BIT5 + BIT4)) {
DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
info->rx_restart = true;
}
info->pending_bh |= BH_RECEIVE;
}
static void isr_tdma(struct slgt_info *info)
{
unsigned int status = rd_reg32(info, TDCSR);
DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
/* TDCSR (tx DMA control/status)
*
* 31..06 reserved
* 05 error
* 04 eol (end of list)
* 03 eob (end of buffer)
* 02 IRQ enable
* 01 reset
* 00 enable
*/
wr_reg32(info, TDCSR, status); /* clear pending */
if (status & (BIT5 + BIT4 + BIT3)) {
// another transmit buffer has completed
// run bottom half to get more send data from user
info->pending_bh |= BH_TRANSMIT;
}
}
/*
* return true if there are unsent tx DMA buffers, otherwise false
*
* if there are unsent buffers then info->tbuf_start
* is set to index of first unsent buffer
*/
static bool unsent_tbufs(struct slgt_info *info)
{
unsigned int i = info->tbuf_current;
bool rc = false;
/*
* search backwards from last loaded buffer (precedes tbuf_current)
* for first unsent buffer (desc_count > 0)
*/
do {
if (i)
i--;
else
i = info->tbuf_count - 1;
if (!desc_count(info->tbufs[i]))
break;
info->tbuf_start = i;
rc = true;
} while (i != info->tbuf_current);
return rc;
}
static void isr_txeom(struct slgt_info *info, unsigned short status)
{
DBGISR(("%s txeom status=%04x\n", info->device_name, status));
slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
tdma_reset(info);
if (status & IRQ_TXUNDER) {
unsigned short val = rd_reg16(info, TCR);
wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
wr_reg16(info, TCR, val); /* clear reset bit */
}
if (info->tx_active) {
if (info->params.mode != MGSL_MODE_ASYNC) {
if (status & IRQ_TXUNDER)
info->icount.txunder++;
else if (status & IRQ_TXIDLE)
info->icount.txok++;
}
if (unsent_tbufs(info)) {
tx_start(info);
update_tx_timer(info);
return;
}
info->tx_active = false;
del_timer(&info->tx_timer);
if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
info->signals &= ~SerialSignal_RTS;
info->drop_rts_on_tx_done = false;
set_signals(info);
}
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
#endif
{
if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
tx_stop(info);
return;
}
info->pending_bh |= BH_TRANSMIT;
}
}
}
static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
{
struct cond_wait *w, *prev;
/* wake processes waiting for specific transitions */
for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
if (w->data & changed) {
w->data = state;
wake_up_interruptible(&w->q);
if (prev != NULL)
prev->next = w->next;
else
info->gpio_wait_q = w->next;
} else
prev = w;
}
}
/* interrupt service routine
*
* irq interrupt number
* dev_id device ID supplied during interrupt registration
*/
static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
{
struct slgt_info *info = dev_id;
unsigned int gsr;
unsigned int i;
DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
info->irq_occurred = true;
for(i=0; i < info->port_count ; i++) {
if (info->port_array[i] == NULL)
continue;
spin_lock(&info->port_array[i]->lock);
if (gsr & (BIT8 << i))
isr_serial(info->port_array[i]);
if (gsr & (BIT16 << (i*2)))
isr_rdma(info->port_array[i]);
if (gsr & (BIT17 << (i*2)))
isr_tdma(info->port_array[i]);
spin_unlock(&info->port_array[i]->lock);
}
}
if (info->gpio_present) {
unsigned int state;
unsigned int changed;
spin_lock(&info->lock);
while ((changed = rd_reg32(info, IOSR)) != 0) {
DBGISR(("%s iosr=%08x\n", info->device_name, changed));
/* read latched state of GPIO signals */
state = rd_reg32(info, IOVR);
/* clear pending GPIO interrupt bits */
wr_reg32(info, IOSR, changed);
for (i=0 ; i < info->port_count ; i++) {
if (info->port_array[i] != NULL)
isr_gpio(info->port_array[i], changed, state);
}
}
spin_unlock(&info->lock);
}
for(i=0; i < info->port_count ; i++) {
struct slgt_info *port = info->port_array[i];
if (port == NULL)
continue;
spin_lock(&port->lock);
if ((port->port.count || port->netcount) &&
port->pending_bh && !port->bh_running &&
!port->bh_requested) {
DBGISR(("%s bh queued\n", port->device_name));
schedule_work(&port->task);
port->bh_requested = true;
}
spin_unlock(&port->lock);
}
DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
return IRQ_HANDLED;
}
static int startup(struct slgt_info *info)
{
DBGINFO(("%s startup\n", info->device_name));
if (info->port.flags & ASYNC_INITIALIZED)
return 0;
if (!info->tx_buf) {
info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
if (!info->tx_buf) {
DBGERR(("%s can't allocate tx buffer\n", info->device_name));
return -ENOMEM;
}
}
info->pending_bh = 0;
memset(&info->icount, 0, sizeof(info->icount));
/* program hardware for current parameters */
change_params(info);
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
info->port.flags |= ASYNC_INITIALIZED;
return 0;
}
/*
* called by close() and hangup() to shutdown hardware
*/
static void shutdown(struct slgt_info *info)
{
unsigned long flags;
if (!(info->port.flags & ASYNC_INITIALIZED))
return;
DBGINFO(("%s shutdown\n", info->device_name));
/* clear status wait queue because status changes */
/* can't happen after shutting down the hardware */
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
del_timer_sync(&info->tx_timer);
del_timer_sync(&info->rx_timer);
kfree(info->tx_buf);
info->tx_buf = NULL;
spin_lock_irqsave(&info->lock,flags);
tx_stop(info);
rx_stop(info);
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
set_signals(info);
}
flush_cond_wait(&info->gpio_wait_q);
spin_unlock_irqrestore(&info->lock,flags);
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
}
static void program_hw(struct slgt_info *info)
{
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
rx_stop(info);
tx_stop(info);
if (info->params.mode != MGSL_MODE_ASYNC ||
info->netcount)
sync_mode(info);
else
async_mode(info);
set_signals(info);
info->dcd_chkcount = 0;
info->cts_chkcount = 0;
info->ri_chkcount = 0;
info->dsr_chkcount = 0;
slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
get_signals(info);
if (info->netcount ||
(info->port.tty && info->port.tty->termios->c_cflag & CREAD))
rx_start(info);
spin_unlock_irqrestore(&info->lock,flags);
}
/*
* reconfigure adapter based on new parameters
*/
static void change_params(struct slgt_info *info)
{
unsigned cflag;
int bits_per_char;
if (!info->port.tty || !info->port.tty->termios)
return;
DBGINFO(("%s change_params\n", info->device_name));
cflag = info->port.tty->termios->c_cflag;
/* if B0 rate (hangup) specified then negate DTR and RTS */
/* otherwise assert DTR and RTS */
if (cflag & CBAUD)
info->signals |= SerialSignal_RTS + SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
/* byte size and parity */
switch (cflag & CSIZE) {
case CS5: info->params.data_bits = 5; break;
case CS6: info->params.data_bits = 6; break;
case CS7: info->params.data_bits = 7; break;
case CS8: info->params.data_bits = 8; break;
default: info->params.data_bits = 7; break;
}
info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
if (cflag & PARENB)
info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
else
info->params.parity = ASYNC_PARITY_NONE;
/* calculate number of jiffies to transmit a full
* FIFO (32 bytes) at specified data rate
*/
bits_per_char = info->params.data_bits +
info->params.stop_bits + 1;
info->params.data_rate = tty_get_baud_rate(info->port.tty);
if (info->params.data_rate) {
info->timeout = (32*HZ*bits_per_char) /
info->params.data_rate;
}
info->timeout += HZ/50; /* Add .02 seconds of slop */
if (cflag & CRTSCTS)
info->port.flags |= ASYNC_CTS_FLOW;
else
info->port.flags &= ~ASYNC_CTS_FLOW;
if (cflag & CLOCAL)
info->port.flags &= ~ASYNC_CHECK_CD;
else
info->port.flags |= ASYNC_CHECK_CD;
/* process tty input control flags */
info->read_status_mask = IRQ_RXOVER;
if (I_INPCK(info->port.tty))
info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
info->read_status_mask |= MASK_BREAK;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
if (I_IGNBRK(info->port.tty)) {
info->ignore_status_mask |= MASK_BREAK;
/* If ignoring parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask |= MASK_OVERRUN;
}
program_hw(info);
}
static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
{
DBGINFO(("%s get_stats\n", info->device_name));
if (!user_icount) {
memset(&info->icount, 0, sizeof(info->icount));
} else {
if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
return -EFAULT;
}
return 0;
}
static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
{
DBGINFO(("%s get_params\n", info->device_name));
if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
return -EFAULT;
return 0;
}
static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
{
unsigned long flags;
MGSL_PARAMS tmp_params;
DBGINFO(("%s set_params\n", info->device_name));
if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
return -EFAULT;
spin_lock_irqsave(&info->lock, flags);
if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
info->base_clock = tmp_params.clock_speed;
else
memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
spin_unlock_irqrestore(&info->lock, flags);
program_hw(info);
return 0;
}
static int get_txidle(struct slgt_info *info, int __user *idle_mode)
{
DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
if (put_user(info->idle_mode, idle_mode))
return -EFAULT;
return 0;
}
static int set_txidle(struct slgt_info *info, int idle_mode)
{
unsigned long flags;
DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
spin_lock_irqsave(&info->lock,flags);
info->idle_mode = idle_mode;
if (info->params.mode != MGSL_MODE_ASYNC)
tx_set_idle(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
static int tx_enable(struct slgt_info *info, int enable)
{
unsigned long flags;
DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
spin_lock_irqsave(&info->lock,flags);
if (enable) {
if (!info->tx_enabled)
tx_start(info);
} else {
if (info->tx_enabled)
tx_stop(info);
}
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
/*
* abort transmit HDLC frame
*/
static int tx_abort(struct slgt_info *info)
{
unsigned long flags;
DBGINFO(("%s tx_abort\n", info->device_name));
spin_lock_irqsave(&info->lock,flags);
tdma_reset(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
static int rx_enable(struct slgt_info *info, int enable)
{
unsigned long flags;
unsigned int rbuf_fill_level;
DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable));
spin_lock_irqsave(&info->lock,flags);
/*
* enable[31..16] = receive DMA buffer fill level
* 0 = noop (leave fill level unchanged)
* fill level must be multiple of 4 and <= buffer size
*/
rbuf_fill_level = ((unsigned int)enable) >> 16;
if (rbuf_fill_level) {
if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) {
spin_unlock_irqrestore(&info->lock, flags);
return -EINVAL;
}
info->rbuf_fill_level = rbuf_fill_level;
if (rbuf_fill_level < 128)
info->rx_pio = 1; /* PIO mode */
else
info->rx_pio = 0; /* DMA mode */
rx_stop(info); /* restart receiver to use new fill level */
}
/*
* enable[1..0] = receiver enable command
* 0 = disable
* 1 = enable
* 2 = enable or force hunt mode if already enabled
*/
enable &= 3;
if (enable) {
if (!info->rx_enabled)
rx_start(info);
else if (enable == 2) {
/* force hunt mode (write 1 to RCR[3]) */
wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3);
}
} else {
if (info->rx_enabled)
rx_stop(info);
}
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
/*
* wait for specified event to occur
*/
static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
{
unsigned long flags;
int s;
int rc=0;
struct mgsl_icount cprev, cnow;
int events;
int mask;
struct _input_signal_events oldsigs, newsigs;
DECLARE_WAITQUEUE(wait, current);
if (get_user(mask, mask_ptr))
return -EFAULT;
DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
spin_lock_irqsave(&info->lock,flags);
/* return immediately if state matches requested events */
get_signals(info);
s = info->signals;
events = mask &
( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
if (events) {
spin_unlock_irqrestore(&info->lock,flags);
goto exit;
}
/* save current irq counts */
cprev = info->icount;
oldsigs = info->input_signal_events;
/* enable hunt and idle irqs if needed */
if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
unsigned short val = rd_reg16(info, SCR);
if (!(val & IRQ_RXIDLE))
wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
}
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&info->event_wait_q, &wait);
spin_unlock_irqrestore(&info->lock,flags);
for(;;) {
schedule();
if (signal_pending(current)) {
rc = -ERESTARTSYS;
break;
}
/* get current irq counts */
spin_lock_irqsave(&info->lock,flags);
cnow = info->icount;
newsigs = info->input_signal_events;
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&info->lock,flags);
/* if no change, wait aborted for some reason */
if (newsigs.dsr_up == oldsigs.dsr_up &&
newsigs.dsr_down == oldsigs.dsr_down &&
newsigs.dcd_up == oldsigs.dcd_up &&
newsigs.dcd_down == oldsigs.dcd_down &&
newsigs.cts_up == oldsigs.cts_up &&
newsigs.cts_down == oldsigs.cts_down &&
newsigs.ri_up == oldsigs.ri_up &&
newsigs.ri_down == oldsigs.ri_down &&
cnow.exithunt == cprev.exithunt &&
cnow.rxidle == cprev.rxidle) {
rc = -EIO;
break;
}
events = mask &
( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
(newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
(newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
(newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
(newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
(newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
(newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
(newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
(cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
(cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
if (events)
break;
cprev = cnow;
oldsigs = newsigs;
}
remove_wait_queue(&info->event_wait_q, &wait);
set_current_state(TASK_RUNNING);
if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
spin_lock_irqsave(&info->lock,flags);
if (!waitqueue_active(&info->event_wait_q)) {
/* disable enable exit hunt mode/idle rcvd IRQs */
wr_reg16(info, SCR,
(unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
}
spin_unlock_irqrestore(&info->lock,flags);
}
exit:
if (rc == 0)
rc = put_user(events, mask_ptr);
return rc;
}
static int get_interface(struct slgt_info *info, int __user *if_mode)
{
DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
if (put_user(info->if_mode, if_mode))
return -EFAULT;
return 0;
}
static int set_interface(struct slgt_info *info, int if_mode)
{
unsigned long flags;
unsigned short val;
DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
spin_lock_irqsave(&info->lock,flags);
info->if_mode = if_mode;
msc_set_vcr(info);
/* TCR (tx control) 07 1=RTS driver control */
val = rd_reg16(info, TCR);
if (info->if_mode & MGSL_INTERFACE_RTS_EN)
val |= BIT7;
else
val &= ~BIT7;
wr_reg16(info, TCR, val);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
static int get_xsync(struct slgt_info *info, int __user *xsync)
{
DBGINFO(("%s get_xsync=%x\n", info->device_name, info->xsync));
if (put_user(info->xsync, xsync))
return -EFAULT;
return 0;
}
/*
* set extended sync pattern (1 to 4 bytes) for extended sync mode
*
* sync pattern is contained in least significant bytes of value
* most significant byte of sync pattern is oldest (1st sent/detected)
*/
static int set_xsync(struct slgt_info *info, int xsync)
{
unsigned long flags;
DBGINFO(("%s set_xsync=%x)\n", info->device_name, xsync));
spin_lock_irqsave(&info->lock, flags);
info->xsync = xsync;
wr_reg32(info, XSR, xsync);
spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int get_xctrl(struct slgt_info *info, int __user *xctrl)
{
DBGINFO(("%s get_xctrl=%x\n", info->device_name, info->xctrl));
if (put_user(info->xctrl, xctrl))
return -EFAULT;
return 0;
}
/*
* set extended control options
*
* xctrl[31:19] reserved, must be zero
* xctrl[18:17] extended sync pattern length in bytes
* 00 = 1 byte in xsr[7:0]
* 01 = 2 bytes in xsr[15:0]
* 10 = 3 bytes in xsr[23:0]
* 11 = 4 bytes in xsr[31:0]
* xctrl[16] 1 = enable terminal count, 0=disabled
* xctrl[15:0] receive terminal count for fixed length packets
* value is count minus one (0 = 1 byte packet)
* when terminal count is reached, receiver
* automatically returns to hunt mode and receive
* FIFO contents are flushed to DMA buffers with
* end of frame (EOF) status
*/
static int set_xctrl(struct slgt_info *info, int xctrl)
{
unsigned long flags;
DBGINFO(("%s set_xctrl=%x)\n", info->device_name, xctrl));
spin_lock_irqsave(&info->lock, flags);
info->xctrl = xctrl;
wr_reg32(info, XCR, xctrl);
spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
/*
* set general purpose IO pin state and direction
*
* user_gpio fields:
* state each bit indicates a pin state
* smask set bit indicates pin state to set
* dir each bit indicates a pin direction (0=input, 1=output)
* dmask set bit indicates pin direction to set
*/
static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
{
unsigned long flags;
struct gpio_desc gpio;
__u32 data;
if (!info->gpio_present)
return -EINVAL;
if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
return -EFAULT;
DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
info->device_name, gpio.state, gpio.smask,
gpio.dir, gpio.dmask));
spin_lock_irqsave(&info->port_array[0]->lock, flags);
if (gpio.dmask) {
data = rd_reg32(info, IODR);
data |= gpio.dmask & gpio.dir;
data &= ~(gpio.dmask & ~gpio.dir);
wr_reg32(info, IODR, data);
}
if (gpio.smask) {
data = rd_reg32(info, IOVR);
data |= gpio.smask & gpio.state;
data &= ~(gpio.smask & ~gpio.state);
wr_reg32(info, IOVR, data);
}
spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
return 0;
}
/*
* get general purpose IO pin state and direction
*/
static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
{
struct gpio_desc gpio;
if (!info->gpio_present)
return -EINVAL;
gpio.state = rd_reg32(info, IOVR);
gpio.smask = 0xffffffff;
gpio.dir = rd_reg32(info, IODR);
gpio.dmask = 0xffffffff;
if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
return -EFAULT;
DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
info->device_name, gpio.state, gpio.dir));
return 0;
}
/*
* conditional wait facility
*/
static void init_cond_wait(struct cond_wait *w, unsigned int data)
{
init_waitqueue_head(&w->q);
init_waitqueue_entry(&w->wait, current);
w->data = data;
}
static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
{
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&w->q, &w->wait);
w->next = *head;
*head = w;
}
static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
{
struct cond_wait *w, *prev;
remove_wait_queue(&cw->q, &cw->wait);
set_current_state(TASK_RUNNING);
for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
if (w == cw) {
if (prev != NULL)
prev->next = w->next;
else
*head = w->next;
break;
}
}
}
static void flush_cond_wait(struct cond_wait **head)
{
while (*head != NULL) {
wake_up_interruptible(&(*head)->q);
*head = (*head)->next;
}
}
/*
* wait for general purpose I/O pin(s) to enter specified state
*
* user_gpio fields:
* state - bit indicates target pin state
* smask - set bit indicates watched pin
*
* The wait ends when at least one watched pin enters the specified
* state. When 0 (no error) is returned, user_gpio->state is set to the
* state of all GPIO pins when the wait ends.
*
* Note: Each pin may be a dedicated input, dedicated output, or
* configurable input/output. The number and configuration of pins
* varies with the specific adapter model. Only input pins (dedicated
* or configured) can be monitored with this function.
*/
static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
{
unsigned long flags;
int rc = 0;
struct gpio_desc gpio;
struct cond_wait wait;
u32 state;
if (!info->gpio_present)
return -EINVAL;
if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
return -EFAULT;
DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
info->device_name, gpio.state, gpio.smask));
/* ignore output pins identified by set IODR bit */
if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
return -EINVAL;
init_cond_wait(&wait, gpio.smask);
spin_lock_irqsave(&info->port_array[0]->lock, flags);
/* enable interrupts for watched pins */
wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
/* get current pin states */
state = rd_reg32(info, IOVR);
if (gpio.smask & ~(state ^ gpio.state)) {
/* already in target state */
gpio.state = state;
} else {
/* wait for target state */
add_cond_wait(&info->gpio_wait_q, &wait);
spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
schedule();
if (signal_pending(current))
rc = -ERESTARTSYS;
else
gpio.state = wait.data;
spin_lock_irqsave(&info->port_array[0]->lock, flags);
remove_cond_wait(&info->gpio_wait_q, &wait);
}
/* disable all GPIO interrupts if no waiting processes */
if (info->gpio_wait_q == NULL)
wr_reg32(info, IOER, 0);
spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
rc = -EFAULT;
return rc;
}
static int modem_input_wait(struct slgt_info *info,int arg)
{
unsigned long flags;
int rc;
struct mgsl_icount cprev, cnow;
DECLARE_WAITQUEUE(wait, current);
/* save current irq counts */
spin_lock_irqsave(&info->lock,flags);
cprev = info->icount;
add_wait_queue(&info->status_event_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&info->lock,flags);
for(;;) {
schedule();
if (signal_pending(current)) {
rc = -ERESTARTSYS;
break;
}
/* get new irq counts */
spin_lock_irqsave(&info->lock,flags);
cnow = info->icount;
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&info->lock,flags);
/* if no change, wait aborted for some reason */
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
rc = -EIO;
break;
}
/* check for change in caller specified modem input */
if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
(arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
(arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
(arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
rc = 0;
break;
}
cprev = cnow;
}
remove_wait_queue(&info->status_event_wait_q, &wait);
set_current_state(TASK_RUNNING);
return rc;
}
/*
* return state of serial control and status signals
*/
static int tiocmget(struct tty_struct *tty)
{
struct slgt_info *info = tty->driver_data;
unsigned int result;
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
((info->signals & SerialSignal_RI) ? TIOCM_RNG:0) +
((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
return result;
}
/*
* set modem control signals (DTR/RTS)
*
* cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
* TIOCMSET = set/clear signal values
* value bit mask for command
*/
static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
if (set & TIOCM_RTS)
info->signals |= SerialSignal_RTS;
if (set & TIOCM_DTR)
info->signals |= SerialSignal_DTR;
if (clear & TIOCM_RTS)
info->signals &= ~SerialSignal_RTS;
if (clear & TIOCM_DTR)
info->signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
static int carrier_raised(struct tty_port *port)
{
unsigned long flags;
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
static void dtr_rts(struct tty_port *port, int on)
{
unsigned long flags;
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
if (on)
info->signals |= SerialSignal_RTS + SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
/*
* block current process until the device is ready to open
*/
static int block_til_ready(struct tty_struct *tty, struct file *filp,
struct slgt_info *info)
{
DECLARE_WAITQUEUE(wait, current);
int retval;
bool do_clocal = false;
bool extra_count = false;
unsigned long flags;
int cd;
struct tty_port *port = &info->port;
DBGINFO(("%s block_til_ready\n", tty->driver->name));
if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
/* nonblock mode is set or port is not enabled */
port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
if (tty->termios->c_cflag & CLOCAL)
do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
* this loop, port->count is dropped by one, so that
* close() knows when to free things. We restore it upon
* exit, either normal or abnormal.
*/
retval = 0;
add_wait_queue(&port->open_wait, &wait);
spin_lock_irqsave(&info->lock, flags);
if (!tty_hung_up_p(filp)) {
extra_count = true;
port->count--;
}
spin_unlock_irqrestore(&info->lock, flags);
port->blocked_open++;
while (1) {
if ((tty->termios->c_cflag & CBAUD))
tty_port_raise_dtr_rts(port);
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
retval = (port->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS;
break;
}
cd = tty_port_carrier_raised(port);
if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd ))
break;
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
tty_unlock();
schedule();
tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&port->open_wait, &wait);
if (extra_count)
port->count++;
port->blocked_open--;
if (!retval)
port->flags |= ASYNC_NORMAL_ACTIVE;
DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
return retval;
}
static int alloc_tmp_rbuf(struct slgt_info *info)
{
info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
if (info->tmp_rbuf == NULL)
return -ENOMEM;
return 0;
}
static void free_tmp_rbuf(struct slgt_info *info)
{
kfree(info->tmp_rbuf);
info->tmp_rbuf = NULL;
}
/*
* allocate DMA descriptor lists.
*/
static int alloc_desc(struct slgt_info *info)
{
unsigned int i;
unsigned int pbufs;
/* allocate memory to hold descriptor lists */
info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr);
if (info->bufs == NULL)
return -ENOMEM;
memset(info->bufs, 0, DESC_LIST_SIZE);
info->rbufs = (struct slgt_desc*)info->bufs;
info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
pbufs = (unsigned int)info->bufs_dma_addr;
/*
* Build circular lists of descriptors
*/
for (i=0; i < info->rbuf_count; i++) {
/* physical address of this descriptor */
info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
/* physical address of next descriptor */
if (i == info->rbuf_count - 1)
info->rbufs[i].next = cpu_to_le32(pbufs);
else
info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
set_desc_count(info->rbufs[i], DMABUFSIZE);
}
for (i=0; i < info->tbuf_count; i++) {
/* physical address of this descriptor */
info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
/* physical address of next descriptor */
if (i == info->tbuf_count - 1)
info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
else
info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
}
return 0;
}
static void free_desc(struct slgt_info *info)
{
if (info->bufs != NULL) {
pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr);
info->bufs = NULL;
info->rbufs = NULL;
info->tbufs = NULL;
}
}
static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
{
int i;
for (i=0; i < count; i++) {
if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL)
return -ENOMEM;
bufs[i].pbuf = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
}
return 0;
}
static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
{
int i;
for (i=0; i < count; i++) {
if (bufs[i].buf == NULL)
continue;
pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr);
bufs[i].buf = NULL;
}
}
static int alloc_dma_bufs(struct slgt_info *info)
{
info->rbuf_count = 32;
info->tbuf_count = 32;
if (alloc_desc(info) < 0 ||
alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
alloc_tmp_rbuf(info) < 0) {
DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
return -ENOMEM;
}
reset_rbufs(info);
return 0;
}
static void free_dma_bufs(struct slgt_info *info)
{
if (info->bufs) {
free_bufs(info, info->rbufs, info->rbuf_count);
free_bufs(info, info->tbufs, info->tbuf_count);
free_desc(info);
}
free_tmp_rbuf(info);
}
static int claim_resources(struct slgt_info *info)
{
if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
DBGERR(("%s reg addr conflict, addr=%08X\n",
info->device_name, info->phys_reg_addr));
info->init_error = DiagStatus_AddressConflict;
goto errout;
}
else
info->reg_addr_requested = true;
info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
if (!info->reg_addr) {
DBGERR(("%s can't map device registers, addr=%08X\n",
info->device_name, info->phys_reg_addr));
info->init_error = DiagStatus_CantAssignPciResources;
goto errout;
}
return 0;
errout:
release_resources(info);
return -ENODEV;
}
static void release_resources(struct slgt_info *info)
{
if (info->irq_requested) {
free_irq(info->irq_level, info);
info->irq_requested = false;
}
if (info->reg_addr_requested) {
release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
info->reg_addr_requested = false;
}
if (info->reg_addr) {
iounmap(info->reg_addr);
info->reg_addr = NULL;
}
}
/* Add the specified device instance data structure to the
* global linked list of devices and increment the device count.
*/
static void add_device(struct slgt_info *info)
{
char *devstr;
info->next_device = NULL;
info->line = slgt_device_count;
sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
if (info->line < MAX_DEVICES) {
if (maxframe[info->line])
info->max_frame_size = maxframe[info->line];
}
slgt_device_count++;
if (!slgt_device_list)
slgt_device_list = info;
else {
struct slgt_info *current_dev = slgt_device_list;
while(current_dev->next_device)
current_dev = current_dev->next_device;
current_dev->next_device = info;
}
if (info->max_frame_size < 4096)
info->max_frame_size = 4096;
else if (info->max_frame_size > 65535)
info->max_frame_size = 65535;
switch(info->pdev->device) {
case SYNCLINK_GT_DEVICE_ID:
devstr = "GT";
break;
case SYNCLINK_GT2_DEVICE_ID:
devstr = "GT2";
break;
case SYNCLINK_GT4_DEVICE_ID:
devstr = "GT4";
break;
case SYNCLINK_AC_DEVICE_ID:
devstr = "AC";
info->params.mode = MGSL_MODE_ASYNC;
break;
default:
devstr = "(unknown model)";
}
printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
devstr, info->device_name, info->phys_reg_addr,
info->irq_level, info->max_frame_size);
#if SYNCLINK_GENERIC_HDLC
hdlcdev_init(info);
#endif
}
static const struct tty_port_operations slgt_port_ops = {
.carrier_raised = carrier_raised,
.dtr_rts = dtr_rts,
};
/*
* allocate device instance structure, return NULL on failure
*/
static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
{
struct slgt_info *info;
info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
if (!info) {
DBGERR(("%s device alloc failed adapter=%d port=%d\n",
driver_name, adapter_num, port_num));
} else {
tty_port_init(&info->port);
info->port.ops = &slgt_port_ops;
info->magic = MGSL_MAGIC;
INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->base_clock = 14745600;
info->rbuf_fill_level = DMABUFSIZE;
info->port.close_delay = 5*HZ/10;
info->port.closing_wait = 30*HZ;
init_waitqueue_head(&info->status_event_wait_q);
init_waitqueue_head(&info->event_wait_q);
spin_lock_init(&info->netlock);
memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
info->idle_mode = HDLC_TXIDLE_FLAGS;
info->adapter_num = adapter_num;
info->port_num = port_num;
setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
/* Copy configuration info to device instance data */
info->pdev = pdev;
info->irq_level = pdev->irq;
info->phys_reg_addr = pci_resource_start(pdev,0);
info->bus_type = MGSL_BUS_TYPE_PCI;
info->irq_flags = IRQF_SHARED;
info->init_error = -1; /* assume error, set to 0 on successful init */
}
return info;
}
static void device_init(int adapter_num, struct pci_dev *pdev)
{
struct slgt_info *port_array[SLGT_MAX_PORTS];
int i;
int port_count = 1;
if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
port_count = 2;
else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
port_count = 4;
/* allocate device instances for all ports */
for (i=0; i < port_count; ++i) {
port_array[i] = alloc_dev(adapter_num, i, pdev);
if (port_array[i] == NULL) {
for (--i; i >= 0; --i)
kfree(port_array[i]);
return;
}
}
/* give copy of port_array to all ports and add to device list */
for (i=0; i < port_count; ++i) {
memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
add_device(port_array[i]);
port_array[i]->port_count = port_count;
spin_lock_init(&port_array[i]->lock);
}
/* Allocate and claim adapter resources */
if (!claim_resources(port_array[0])) {
alloc_dma_bufs(port_array[0]);
/* copy resource information from first port to others */
for (i = 1; i < port_count; ++i) {
port_array[i]->irq_level = port_array[0]->irq_level;
port_array[i]->reg_addr = port_array[0]->reg_addr;
alloc_dma_bufs(port_array[i]);
}
if (request_irq(port_array[0]->irq_level,
slgt_interrupt,
port_array[0]->irq_flags,
port_array[0]->device_name,
port_array[0]) < 0) {
DBGERR(("%s request_irq failed IRQ=%d\n",
port_array[0]->device_name,
port_array[0]->irq_level));
} else {
port_array[0]->irq_requested = true;
adapter_test(port_array[0]);
for (i=1 ; i < port_count ; i++) {
port_array[i]->init_error = port_array[0]->init_error;
port_array[i]->gpio_present = port_array[0]->gpio_present;
}
}
}
for (i=0; i < port_count; ++i)
tty_register_device(serial_driver, port_array[i]->line, &(port_array[i]->pdev->dev));
}
static int __devinit init_one(struct pci_dev *dev,
const struct pci_device_id *ent)
{
if (pci_enable_device(dev)) {
printk("error enabling pci device %p\n", dev);
return -EIO;
}
pci_set_master(dev);
device_init(slgt_device_count, dev);
return 0;
}
static void __devexit remove_one(struct pci_dev *dev)
{
}
static const struct tty_operations ops = {
.open = open,
.close = close,
.write = write,
.put_char = put_char,
.flush_chars = flush_chars,
.write_room = write_room,
.chars_in_buffer = chars_in_buffer,
.flush_buffer = flush_buffer,
.ioctl = ioctl,
.compat_ioctl = slgt_compat_ioctl,
.throttle = throttle,
.unthrottle = unthrottle,
.send_xchar = send_xchar,
.break_ctl = set_break,
.wait_until_sent = wait_until_sent,
.set_termios = set_termios,
.stop = tx_hold,
.start = tx_release,
.hangup = hangup,
.tiocmget = tiocmget,
.tiocmset = tiocmset,
.get_icount = get_icount,
.proc_fops = &synclink_gt_proc_fops,
};
static void slgt_cleanup(void)
{
int rc;
struct slgt_info *info;
struct slgt_info *tmp;
printk(KERN_INFO "unload %s\n", driver_name);
if (serial_driver) {
for (info=slgt_device_list ; info != NULL ; info=info->next_device)
tty_unregister_device(serial_driver, info->line);
if ((rc = tty_unregister_driver(serial_driver)))
DBGERR(("tty_unregister_driver error=%d\n", rc));
put_tty_driver(serial_driver);
}
/* reset devices */
info = slgt_device_list;
while(info) {
reset_port(info);
info = info->next_device;
}
/* release devices */
info = slgt_device_list;
while(info) {
#if SYNCLINK_GENERIC_HDLC
hdlcdev_exit(info);
#endif
free_dma_bufs(info);
free_tmp_rbuf(info);
if (info->port_num == 0)
release_resources(info);
tmp = info;
info = info->next_device;
kfree(tmp);
}
if (pci_registered)
pci_unregister_driver(&pci_driver);
}
/*
* Driver initialization entry point.
*/
static int __init slgt_init(void)
{
int rc;
printk(KERN_INFO "%s\n", driver_name);
serial_driver = alloc_tty_driver(MAX_DEVICES);
if (!serial_driver) {
printk("%s can't allocate tty driver\n", driver_name);
return -ENOMEM;
}
/* Initialize the tty_driver structure */
serial_driver->driver_name = tty_driver_name;
serial_driver->name = tty_dev_prefix;
serial_driver->major = ttymajor;
serial_driver->minor_start = 64;
serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
serial_driver->subtype = SERIAL_TYPE_NORMAL;
serial_driver->init_termios = tty_std_termios;
serial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
serial_driver->init_termios.c_ispeed = 9600;
serial_driver->init_termios.c_ospeed = 9600;
serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(serial_driver, &ops);
if ((rc = tty_register_driver(serial_driver)) < 0) {
DBGERR(("%s can't register serial driver\n", driver_name));
put_tty_driver(serial_driver);
serial_driver = NULL;
goto error;
}
printk(KERN_INFO "%s, tty major#%d\n",
driver_name, serial_driver->major);
slgt_device_count = 0;
if ((rc = pci_register_driver(&pci_driver)) < 0) {
printk("%s pci_register_driver error=%d\n", driver_name, rc);
goto error;
}
pci_registered = true;
if (!slgt_device_list)
printk("%s no devices found\n",driver_name);
return 0;
error:
slgt_cleanup();
return rc;
}
static void __exit slgt_exit(void)
{
slgt_cleanup();
}
module_init(slgt_init);
module_exit(slgt_exit);
/*
* register access routines
*/
#define CALC_REGADDR() \
unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
if (addr >= 0x80) \
reg_addr += (info->port_num) * 32; \
else if (addr >= 0x40) \
reg_addr += (info->port_num) * 16;
static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
{
CALC_REGADDR();
return readb((void __iomem *)reg_addr);
}
static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
{
CALC_REGADDR();
writeb(value, (void __iomem *)reg_addr);
}
static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
{
CALC_REGADDR();
return readw((void __iomem *)reg_addr);
}
static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
{
CALC_REGADDR();
writew(value, (void __iomem *)reg_addr);
}
static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
{
CALC_REGADDR();
return readl((void __iomem *)reg_addr);
}
static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
{
CALC_REGADDR();
writel(value, (void __iomem *)reg_addr);
}
static void rdma_reset(struct slgt_info *info)
{
unsigned int i;
/* set reset bit */
wr_reg32(info, RDCSR, BIT1);
/* wait for enable bit cleared */
for(i=0 ; i < 1000 ; i++)
if (!(rd_reg32(info, RDCSR) & BIT0))
break;
}
static void tdma_reset(struct slgt_info *info)
{
unsigned int i;
/* set reset bit */
wr_reg32(info, TDCSR, BIT1);
/* wait for enable bit cleared */
for(i=0 ; i < 1000 ; i++)
if (!(rd_reg32(info, TDCSR) & BIT0))
break;
}
/*
* enable internal loopback
* TxCLK and RxCLK are generated from BRG
* and TxD is looped back to RxD internally.
*/
static void enable_loopback(struct slgt_info *info)
{
/* SCR (serial control) BIT2=loopback enable */
wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
if (info->params.mode != MGSL_MODE_ASYNC) {
/* CCR (clock control)
* 07..05 tx clock source (010 = BRG)
* 04..02 rx clock source (010 = BRG)
* 01 auxclk enable (0 = disable)
* 00 BRG enable (1 = enable)
*
* 0100 1001
*/
wr_reg8(info, CCR, 0x49);
/* set speed if available, otherwise use default */
if (info->params.clock_speed)
set_rate(info, info->params.clock_speed);
else
set_rate(info, 3686400);
}
}
/*
* set baud rate generator to specified rate
*/
static void set_rate(struct slgt_info *info, u32 rate)
{
unsigned int div;
unsigned int osc = info->base_clock;
/* div = osc/rate - 1
*
* Round div up if osc/rate is not integer to
* force to next slowest rate.
*/
if (rate) {
div = osc/rate;
if (!(osc % rate) && div)
div--;
wr_reg16(info, BDR, (unsigned short)div);
}
}
static void rx_stop(struct slgt_info *info)
{
unsigned short val;
/* disable and reset receiver */
val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
wr_reg16(info, RCR, val); /* clear reset bit */
slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
/* clear pending rx interrupts */
wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
rdma_reset(info);
info->rx_enabled = false;
info->rx_restart = false;
}
static void rx_start(struct slgt_info *info)
{
unsigned short val;
slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
/* clear pending rx overrun IRQ */
wr_reg16(info, SSR, IRQ_RXOVER);
/* reset and disable receiver */
val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
wr_reg16(info, RCR, val); /* clear reset bit */
rdma_reset(info);
reset_rbufs(info);
if (info->rx_pio) {
/* rx request when rx FIFO not empty */
wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
slgt_irq_on(info, IRQ_RXDATA);
if (info->params.mode == MGSL_MODE_ASYNC) {
/* enable saving of rx status */
wr_reg32(info, RDCSR, BIT6);
}
} else {
/* rx request when rx FIFO half full */
wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
/* set 1st descriptor address */
wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
if (info->params.mode != MGSL_MODE_ASYNC) {
/* enable rx DMA and DMA interrupt */
wr_reg32(info, RDCSR, (BIT2 + BIT0));
} else {
/* enable saving of rx status, rx DMA and DMA interrupt */
wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
}
}
slgt_irq_on(info, IRQ_RXOVER);
/* enable receiver */
wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
info->rx_restart = false;
info->rx_enabled = true;
}
static void tx_start(struct slgt_info *info)
{
if (!info->tx_enabled) {
wr_reg16(info, TCR,
(unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
info->tx_enabled = true;
}
if (desc_count(info->tbufs[info->tbuf_start])) {
info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
get_signals(info);
if (!(info->signals & SerialSignal_RTS)) {
info->signals |= SerialSignal_RTS;
set_signals(info);
info->drop_rts_on_tx_done = true;
}
}
slgt_irq_off(info, IRQ_TXDATA);
slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
/* clear tx idle and underrun status bits */
wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
} else {
slgt_irq_off(info, IRQ_TXDATA);
slgt_irq_on(info, IRQ_TXIDLE);
/* clear tx idle status bit */
wr_reg16(info, SSR, IRQ_TXIDLE);
}
/* set 1st descriptor address and start DMA */
wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
wr_reg32(info, TDCSR, BIT2 + BIT0);
info->tx_active = true;
}
}
static void tx_stop(struct slgt_info *info)
{
unsigned short val;
del_timer(&info->tx_timer);
tdma_reset(info);
/* reset and disable transmitter */
val = rd_reg16(info, TCR) & ~BIT1; /* clear enable bit */
wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
/* clear tx idle and underrun status bit */
wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
reset_tbufs(info);
info->tx_enabled = false;
info->tx_active = false;
}
static void reset_port(struct slgt_info *info)
{
if (!info->reg_addr)
return;
tx_stop(info);
rx_stop(info);
info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
set_signals(info);
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
}
static void reset_adapter(struct slgt_info *info)
{
int i;
for (i=0; i < info->port_count; ++i) {
if (info->port_array[i])
reset_port(info->port_array[i]);
}
}
static void async_mode(struct slgt_info *info)
{
unsigned short val;
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
tx_stop(info);
rx_stop(info);
/* TCR (tx control)
*
* 15..13 mode, 010=async
* 12..10 encoding, 000=NRZ
* 09 parity enable
* 08 1=odd parity, 0=even parity
* 07 1=RTS driver control
* 06 1=break enable
* 05..04 character length
* 00=5 bits
* 01=6 bits
* 10=7 bits
* 11=8 bits
* 03 0=1 stop bit, 1=2 stop bits
* 02 reset
* 01 enable
* 00 auto-CTS enable
*/
val = 0x4000;
if (info->if_mode & MGSL_INTERFACE_RTS_EN)
val |= BIT7;
if (info->params.parity != ASYNC_PARITY_NONE) {
val |= BIT9;
if (info->params.parity == ASYNC_PARITY_ODD)
val |= BIT8;
}
switch (info->params.data_bits)
{
case 6: val |= BIT4; break;
case 7: val |= BIT5; break;
case 8: val |= BIT5 + BIT4; break;
}
if (info->params.stop_bits != 1)
val |= BIT3;
if (info->params.flags & HDLC_FLAG_AUTO_CTS)
val |= BIT0;
wr_reg16(info, TCR, val);
/* RCR (rx control)
*
* 15..13 mode, 010=async
* 12..10 encoding, 000=NRZ
* 09 parity enable
* 08 1=odd parity, 0=even parity
* 07..06 reserved, must be 0
* 05..04 character length
* 00=5 bits
* 01=6 bits
* 10=7 bits
* 11=8 bits
* 03 reserved, must be zero
* 02 reset
* 01 enable
* 00 auto-DCD enable
*/
val = 0x4000;
if (info->params.parity != ASYNC_PARITY_NONE) {
val |= BIT9;
if (info->params.parity == ASYNC_PARITY_ODD)
val |= BIT8;
}
switch (info->params.data_bits)
{
case 6: val |= BIT4; break;
case 7: val |= BIT5; break;
case 8: val |= BIT5 + BIT4; break;
}
if (info->params.flags & HDLC_FLAG_AUTO_DCD)
val |= BIT0;
wr_reg16(info, RCR, val);
/* CCR (clock control)
*
* 07..05 011 = tx clock source is BRG/16
* 04..02 010 = rx clock source is BRG
* 01 0 = auxclk disabled
* 00 1 = BRG enabled
*
* 0110 1001
*/
wr_reg8(info, CCR, 0x69);
msc_set_vcr(info);
/* SCR (serial control)
*
* 15 1=tx req on FIFO half empty
* 14 1=rx req on FIFO half full
* 13 tx data IRQ enable
* 12 tx idle IRQ enable
* 11 rx break on IRQ enable
* 10 rx data IRQ enable
* 09 rx break off IRQ enable
* 08 overrun IRQ enable
* 07 DSR IRQ enable
* 06 CTS IRQ enable
* 05 DCD IRQ enable
* 04 RI IRQ enable
* 03 0=16x sampling, 1=8x sampling
* 02 1=txd->rxd internal loopback enable
* 01 reserved, must be zero
* 00 1=master IRQ enable
*/
val = BIT15 + BIT14 + BIT0;
/* JCR[8] : 1 = x8 async mode feature available */
if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
((info->base_clock < (info->params.data_rate * 16)) ||
(info->base_clock % (info->params.data_rate * 16)))) {
/* use 8x sampling */
val |= BIT3;
set_rate(info, info->params.data_rate * 8);
} else {
/* use 16x sampling */
set_rate(info, info->params.data_rate * 16);
}
wr_reg16(info, SCR, val);
slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
if (info->params.loopback)
enable_loopback(info);
}
static void sync_mode(struct slgt_info *info)
{
unsigned short val;
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
tx_stop(info);
rx_stop(info);
/* TCR (tx control)
*
* 15..13 mode
* 000=HDLC/SDLC
* 001=raw bit synchronous
* 010=asynchronous/isochronous
* 011=monosync byte synchronous
* 100=bisync byte synchronous
* 101=xsync byte synchronous
* 12..10 encoding
* 09 CRC enable
* 08 CRC32
* 07 1=RTS driver control
* 06 preamble enable
* 05..04 preamble length
* 03 share open/close flag
* 02 reset
* 01 enable
* 00 auto-CTS enable
*/
val = BIT2;
switch(info->params.mode) {
case MGSL_MODE_XSYNC:
val |= BIT15 + BIT13;
break;
case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
case MGSL_MODE_BISYNC: val |= BIT15; break;
case MGSL_MODE_RAW: val |= BIT13; break;
}
if (info->if_mode & MGSL_INTERFACE_RTS_EN)
val |= BIT7;
switch(info->params.encoding)
{
case HDLC_ENCODING_NRZB: val |= BIT10; break;
case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
}
switch (info->params.crc_type & HDLC_CRC_MASK)
{
case HDLC_CRC_16_CCITT: val |= BIT9; break;
case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
}
if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
val |= BIT6;
switch (info->params.preamble_length)
{
case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
}
if (info->params.flags & HDLC_FLAG_AUTO_CTS)
val |= BIT0;
wr_reg16(info, TCR, val);
/* TPR (transmit preamble) */
switch (info->params.preamble)
{
case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break;
case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
case HDLC_PREAMBLE_PATTERN_10: val = 0x55; break;
case HDLC_PREAMBLE_PATTERN_01: val = 0xaa; break;
default: val = 0x7e; break;
}
wr_reg8(info, TPR, (unsigned char)val);
/* RCR (rx control)
*
* 15..13 mode
* 000=HDLC/SDLC
* 001=raw bit synchronous
* 010=asynchronous/isochronous
* 011=monosync byte synchronous
* 100=bisync byte synchronous
* 101=xsync byte synchronous
* 12..10 encoding
* 09 CRC enable
* 08 CRC32
* 07..03 reserved, must be 0
* 02 reset
* 01 enable
* 00 auto-DCD enable
*/
val = 0;
switch(info->params.mode) {
case MGSL_MODE_XSYNC:
val |= BIT15 + BIT13;
break;
case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
case MGSL_MODE_BISYNC: val |= BIT15; break;
case MGSL_MODE_RAW: val |= BIT13; break;
}
switch(info->params.encoding)
{
case HDLC_ENCODING_NRZB: val |= BIT10; break;
case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
}
switch (info->params.crc_type & HDLC_CRC_MASK)
{
case HDLC_CRC_16_CCITT: val |= BIT9; break;
case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
}
if (info->params.flags & HDLC_FLAG_AUTO_DCD)
val |= BIT0;
wr_reg16(info, RCR, val);
/* CCR (clock control)
*
* 07..05 tx clock source
* 04..02 rx clock source
* 01 auxclk enable
* 00 BRG enable
*/
val = 0;
if (info->params.flags & HDLC_FLAG_TXC_BRG)
{
// when RxC source is DPLL, BRG generates 16X DPLL
// reference clock, so take TxC from BRG/16 to get
// transmit clock at actual data rate
if (info->params.flags & HDLC_FLAG_RXC_DPLL)
val |= BIT6 + BIT5; /* 011, txclk = BRG/16 */
else
val |= BIT6; /* 010, txclk = BRG */
}
else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
val |= BIT7; /* 100, txclk = DPLL Input */
else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
val |= BIT5; /* 001, txclk = RXC Input */
if (info->params.flags & HDLC_FLAG_RXC_BRG)
val |= BIT3; /* 010, rxclk = BRG */
else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
val |= BIT4; /* 100, rxclk = DPLL */
else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
val |= BIT2; /* 001, rxclk = TXC Input */
if (info->params.clock_speed)
val |= BIT1 + BIT0;
wr_reg8(info, CCR, (unsigned char)val);
if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
{
// program DPLL mode
switch(info->params.encoding)
{
case HDLC_ENCODING_BIPHASE_MARK:
case HDLC_ENCODING_BIPHASE_SPACE:
val = BIT7; break;
case HDLC_ENCODING_BIPHASE_LEVEL:
case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
val = BIT7 + BIT6; break;
default: val = BIT6; // NRZ encodings
}
wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
// DPLL requires a 16X reference clock from BRG
set_rate(info, info->params.clock_speed * 16);
}
else
set_rate(info, info->params.clock_speed);
tx_set_idle(info);
msc_set_vcr(info);
/* SCR (serial control)
*
* 15 1=tx req on FIFO half empty
* 14 1=rx req on FIFO half full
* 13 tx data IRQ enable
* 12 tx idle IRQ enable
* 11 underrun IRQ enable
* 10 rx data IRQ enable
* 09 rx idle IRQ enable
* 08 overrun IRQ enable
* 07 DSR IRQ enable
* 06 CTS IRQ enable
* 05 DCD IRQ enable
* 04 RI IRQ enable
* 03 reserved, must be zero
* 02 1=txd->rxd internal loopback enable
* 01 reserved, must be zero
* 00 1=master IRQ enable
*/
wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
if (info->params.loopback)
enable_loopback(info);
}
/*
* set transmit idle mode
*/
static void tx_set_idle(struct slgt_info *info)
{
unsigned char val;
unsigned short tcr;
/* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
* else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
*/
tcr = rd_reg16(info, TCR);
if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
/* disable preamble, set idle size to 16 bits */
tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
/* MSB of 16 bit idle specified in tx preamble register (TPR) */
wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
} else if (!(tcr & BIT6)) {
/* preamble is disabled, set idle size to 8 bits */
tcr &= ~(BIT5 + BIT4);
}
wr_reg16(info, TCR, tcr);
if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
/* LSB of custom tx idle specified in tx idle register */
val = (unsigned char)(info->idle_mode & 0xff);
} else {
/* standard 8 bit idle patterns */
switch(info->idle_mode)
{
case HDLC_TXIDLE_FLAGS: val = 0x7e; break;
case HDLC_TXIDLE_ALT_ZEROS_ONES:
case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
case HDLC_TXIDLE_ZEROS:
case HDLC_TXIDLE_SPACE: val = 0x00; break;
default: val = 0xff;
}
}
wr_reg8(info, TIR, val);
}
/*
* get state of V24 status (input) signals
*/
static void get_signals(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
/* clear all serial signals except DTR and RTS */
info->signals &= SerialSignal_DTR + SerialSignal_RTS;
if (status & BIT3)
info->signals |= SerialSignal_DSR;
if (status & BIT2)
info->signals |= SerialSignal_CTS;
if (status & BIT1)
info->signals |= SerialSignal_DCD;
if (status & BIT0)
info->signals |= SerialSignal_RI;
}
/*
* set V.24 Control Register based on current configuration
*/
static void msc_set_vcr(struct slgt_info *info)
{
unsigned char val = 0;
/* VCR (V.24 control)
*
* 07..04 serial IF select
* 03 DTR
* 02 RTS
* 01 LL
* 00 RL
*/
switch(info->if_mode & MGSL_INTERFACE_MASK)
{
case MGSL_INTERFACE_RS232:
val |= BIT5; /* 0010 */
break;
case MGSL_INTERFACE_V35:
val |= BIT7 + BIT6 + BIT5; /* 1110 */
break;
case MGSL_INTERFACE_RS422:
val |= BIT6; /* 0100 */
break;
}
if (info->if_mode & MGSL_INTERFACE_MSB_FIRST)
val |= BIT4;
if (info->signals & SerialSignal_DTR)
val |= BIT3;
if (info->signals & SerialSignal_RTS)
val |= BIT2;
if (info->if_mode & MGSL_INTERFACE_LL)
val |= BIT1;
if (info->if_mode & MGSL_INTERFACE_RL)
val |= BIT0;
wr_reg8(info, VCR, val);
}
/*
* set state of V24 control (output) signals
*/
static void set_signals(struct slgt_info *info)
{
unsigned char val = rd_reg8(info, VCR);
if (info->signals & SerialSignal_DTR)
val |= BIT3;
else
val &= ~BIT3;
if (info->signals & SerialSignal_RTS)
val |= BIT2;
else
val &= ~BIT2;
wr_reg8(info, VCR, val);
}
/*
* free range of receive DMA buffers (i to last)
*/
static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
{
int done = 0;
while(!done) {
/* reset current buffer for reuse */
info->rbufs[i].status = 0;
set_desc_count(info->rbufs[i], info->rbuf_fill_level);
if (i == last)
done = 1;
if (++i == info->rbuf_count)
i = 0;
}
info->rbuf_current = i;
}
/*
* mark all receive DMA buffers as free
*/
static void reset_rbufs(struct slgt_info *info)
{
free_rbufs(info, 0, info->rbuf_count - 1);
info->rbuf_fill_index = 0;
info->rbuf_fill_count = 0;
}
/*
* pass receive HDLC frame to upper layer
*
* return true if frame available, otherwise false
*/
static bool rx_get_frame(struct slgt_info *info)
{
unsigned int start, end;
unsigned short status;
unsigned int framesize = 0;
unsigned long flags;
struct tty_struct *tty = info->port.tty;
unsigned char addr_field = 0xff;
unsigned int crc_size = 0;
switch (info->params.crc_type & HDLC_CRC_MASK) {
case HDLC_CRC_16_CCITT: crc_size = 2; break;
case HDLC_CRC_32_CCITT: crc_size = 4; break;
}
check_again:
framesize = 0;
addr_field = 0xff;
start = end = info->rbuf_current;
for (;;) {
if (!desc_complete(info->rbufs[end]))
goto cleanup;
if (framesize == 0 && info->params.addr_filter != 0xff)
addr_field = info->rbufs[end].buf[0];
framesize += desc_count(info->rbufs[end]);
if (desc_eof(info->rbufs[end]))
break;
if (++end == info->rbuf_count)
end = 0;
if (end == info->rbuf_current) {
if (info->rx_enabled){
spin_lock_irqsave(&info->lock,flags);
rx_start(info);
spin_unlock_irqrestore(&info->lock,flags);
}
goto cleanup;
}
}
/* status
*
* 15 buffer complete
* 14..06 reserved
* 05..04 residue
* 02 eof (end of frame)
* 01 CRC error
* 00 abort
*/
status = desc_status(info->rbufs[end]);
/* ignore CRC bit if not using CRC (bit is undefined) */
if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
status &= ~BIT1;
if (framesize == 0 ||
(addr_field != 0xff && addr_field != info->params.addr_filter)) {
free_rbufs(info, start, end);
goto check_again;
}
if (framesize < (2 + crc_size) || status & BIT0) {
info->icount.rxshort++;
framesize = 0;
} else if (status & BIT1) {
info->icount.rxcrc++;
if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
framesize = 0;
}
#if SYNCLINK_GENERIC_HDLC
if (framesize == 0) {
info->netdev->stats.rx_errors++;
info->netdev->stats.rx_frame_errors++;
}
#endif
DBGBH(("%s rx frame status=%04X size=%d\n",
info->device_name, status, framesize));
DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx");
if (framesize) {
if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
framesize -= crc_size;
crc_size = 0;
}
if (framesize > info->max_frame_size + crc_size)
info->icount.rxlong++;
else {
/* copy dma buffer(s) to contiguous temp buffer */
int copy_count = framesize;
int i = start;
unsigned char *p = info->tmp_rbuf;
info->tmp_rbuf_count = framesize;
info->icount.rxok++;
while(copy_count) {
int partial_count = min_t(int, copy_count, info->rbuf_fill_level);
memcpy(p, info->rbufs[i].buf, partial_count);
p += partial_count;
copy_count -= partial_count;
if (++i == info->rbuf_count)
i = 0;
}
if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
*p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
framesize++;
}
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_rx(info,info->tmp_rbuf, framesize);
else
#endif
ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
}
}
free_rbufs(info, start, end);
return true;
cleanup:
return false;
}
/*
* pass receive buffer (RAW synchronous mode) to tty layer
* return true if buffer available, otherwise false
*/
static bool rx_get_buf(struct slgt_info *info)
{
unsigned int i = info->rbuf_current;
unsigned int count;
if (!desc_complete(info->rbufs[i]))
return false;
count = desc_count(info->rbufs[i]);
switch(info->params.mode) {
case MGSL_MODE_MONOSYNC:
case MGSL_MODE_BISYNC:
case MGSL_MODE_XSYNC:
/* ignore residue in byte synchronous modes */
if (desc_residue(info->rbufs[i]))
count--;
break;
}
DBGDATA(info, info->rbufs[i].buf, count, "rx");
DBGINFO(("rx_get_buf size=%d\n", count));
if (count)
ldisc_receive_buf(info->port.tty, info->rbufs[i].buf,
info->flag_buf, count);
free_rbufs(info, i, i);
return true;
}
static void reset_tbufs(struct slgt_info *info)
{
unsigned int i;
info->tbuf_current = 0;
for (i=0 ; i < info->tbuf_count ; i++) {
info->tbufs[i].status = 0;
info->tbufs[i].count = 0;
}
}
/*
* return number of free transmit DMA buffers
*/
static unsigned int free_tbuf_count(struct slgt_info *info)
{
unsigned int count = 0;
unsigned int i = info->tbuf_current;
do
{
if (desc_count(info->tbufs[i]))
break; /* buffer in use */
++count;
if (++i == info->tbuf_count)
i=0;
} while (i != info->tbuf_current);
/* if tx DMA active, last zero count buffer is in use */
if (count && (rd_reg32(info, TDCSR) & BIT0))
--count;
return count;
}
/*
* return number of bytes in unsent transmit DMA buffers
* and the serial controller tx FIFO
*/
static unsigned int tbuf_bytes(struct slgt_info *info)
{
unsigned int total_count = 0;
unsigned int i = info->tbuf_current;
unsigned int reg_value;
unsigned int count;
unsigned int active_buf_count = 0;
/*
* Add descriptor counts for all tx DMA buffers.
* If count is zero (cleared by DMA controller after read),
* the buffer is complete or is actively being read from.
*
* Record buf_count of last buffer with zero count starting
* from current ring position. buf_count is mirror
* copy of count and is not cleared by serial controller.
* If DMA controller is active, that buffer is actively
* being read so add to total.
*/
do {
count = desc_count(info->tbufs[i]);
if (count)
total_count += count;
else if (!total_count)
active_buf_count = info->tbufs[i].buf_count;
if (++i == info->tbuf_count)
i = 0;
} while (i != info->tbuf_current);
/* read tx DMA status register */
reg_value = rd_reg32(info, TDCSR);
/* if tx DMA active, last zero count buffer is in use */
if (reg_value & BIT0)
total_count += active_buf_count;
/* add tx FIFO count = reg_value[15..8] */
total_count += (reg_value >> 8) & 0xff;
/* if transmitter active add one byte for shift register */
if (info->tx_active)
total_count++;
return total_count;
}
/*
* load data into transmit DMA buffer ring and start transmitter if needed
* return true if data accepted, otherwise false (buffers full)
*/
static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
{
unsigned short count;
unsigned int i;
struct slgt_desc *d;
/* check required buffer space */
if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
return false;
DBGDATA(info, buf, size, "tx");
/*
* copy data to one or more DMA buffers in circular ring
* tbuf_start = first buffer for this data
* tbuf_current = next free buffer
*
* Copy all data before making data visible to DMA controller by
* setting descriptor count of the first buffer.
* This prevents an active DMA controller from reading the first DMA
* buffers of a frame and stopping before the final buffers are filled.
*/
info->tbuf_start = i = info->tbuf_current;
while (size) {
d = &info->tbufs[i];
count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
memcpy(d->buf, buf, count);
size -= count;
buf += count;
/*
* set EOF bit for last buffer of HDLC frame or
* for every buffer in raw mode
*/
if ((!size && info->params.mode == MGSL_MODE_HDLC) ||
info->params.mode == MGSL_MODE_RAW)
set_desc_eof(*d, 1);
else
set_desc_eof(*d, 0);
/* set descriptor count for all but first buffer */
if (i != info->tbuf_start)
set_desc_count(*d, count);
d->buf_count = count;
if (++i == info->tbuf_count)
i = 0;
}
info->tbuf_current = i;
/* set first buffer count to make new data visible to DMA controller */
d = &info->tbufs[info->tbuf_start];
set_desc_count(*d, d->buf_count);
/* start transmitter if needed and update transmit timeout */
if (!info->tx_active)
tx_start(info);
update_tx_timer(info);
return true;
}
static int register_test(struct slgt_info *info)
{
static unsigned short patterns[] =
{0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
static unsigned int count = ARRAY_SIZE(patterns);
unsigned int i;
int rc = 0;
for (i=0 ; i < count ; i++) {
wr_reg16(info, TIR, patterns[i]);
wr_reg16(info, BDR, patterns[(i+1)%count]);
if ((rd_reg16(info, TIR) != patterns[i]) ||
(rd_reg16(info, BDR) != patterns[(i+1)%count])) {
rc = -ENODEV;
break;
}
}
info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
info->init_error = rc ? 0 : DiagStatus_AddressFailure;
return rc;
}
static int irq_test(struct slgt_info *info)
{
unsigned long timeout;
unsigned long flags;
struct tty_struct *oldtty = info->port.tty;
u32 speed = info->params.data_rate;
info->params.data_rate = 921600;
info->port.tty = NULL;
spin_lock_irqsave(&info->lock, flags);
async_mode(info);
slgt_irq_on(info, IRQ_TXIDLE);
/* enable transmitter */
wr_reg16(info, TCR,
(unsigned short)(rd_reg16(info, TCR) | BIT1));
/* write one byte and wait for tx idle */
wr_reg16(info, TDR, 0);
/* assume failure */
info->init_error = DiagStatus_IrqFailure;
info->irq_occurred = false;
spin_unlock_irqrestore(&info->lock, flags);
timeout=100;
while(timeout-- && !info->irq_occurred)
msleep_interruptible(10);
spin_lock_irqsave(&info->lock,flags);
reset_port(info);
spin_unlock_irqrestore(&info->lock,flags);
info->params.data_rate = speed;
info->port.tty = oldtty;
info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
return info->irq_occurred ? 0 : -ENODEV;
}
static int loopback_test_rx(struct slgt_info *info)
{
unsigned char *src, *dest;
int count;
if (desc_complete(info->rbufs[0])) {
count = desc_count(info->rbufs[0]);
src = info->rbufs[0].buf;
dest = info->tmp_rbuf;
for( ; count ; count-=2, src+=2) {
/* src=data byte (src+1)=status byte */
if (!(*(src+1) & (BIT9 + BIT8))) {
*dest = *src;
dest++;
info->tmp_rbuf_count++;
}
}
DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
return 1;
}
return 0;
}
static int loopback_test(struct slgt_info *info)
{
#define TESTFRAMESIZE 20
unsigned long timeout;
u16 count = TESTFRAMESIZE;
unsigned char buf[TESTFRAMESIZE];
int rc = -ENODEV;
unsigned long flags;
struct tty_struct *oldtty = info->port.tty;
MGSL_PARAMS params;
memcpy(¶ms, &info->params, sizeof(params));
info->params.mode = MGSL_MODE_ASYNC;
info->params.data_rate = 921600;
info->params.loopback = 1;
info->port.tty = NULL;
/* build and send transmit frame */
for (count = 0; count < TESTFRAMESIZE; ++count)
buf[count] = (unsigned char)count;
info->tmp_rbuf_count = 0;
memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
/* program hardware for HDLC and enabled receiver */
spin_lock_irqsave(&info->lock,flags);
async_mode(info);
rx_start(info);
tx_load(info, buf, count);
spin_unlock_irqrestore(&info->lock, flags);
/* wait for receive complete */
for (timeout = 100; timeout; --timeout) {
msleep_interruptible(10);
if (loopback_test_rx(info)) {
rc = 0;
break;
}
}
/* verify received frame length and contents */
if (!rc && (info->tmp_rbuf_count != count ||
memcmp(buf, info->tmp_rbuf, count))) {
rc = -ENODEV;
}
spin_lock_irqsave(&info->lock,flags);
reset_adapter(info);
spin_unlock_irqrestore(&info->lock,flags);
memcpy(&info->params, ¶ms, sizeof(info->params));
info->port.tty = oldtty;
info->init_error = rc ? DiagStatus_DmaFailure : 0;
return rc;
}
static int adapter_test(struct slgt_info *info)
{
DBGINFO(("testing %s\n", info->device_name));
if (register_test(info) < 0) {
printk("register test failure %s addr=%08X\n",
info->device_name, info->phys_reg_addr);
} else if (irq_test(info) < 0) {
printk("IRQ test failure %s IRQ=%d\n",
info->device_name, info->irq_level);
} else if (loopback_test(info) < 0) {
printk("loopback test failure %s\n", info->device_name);
}
return info->init_error;
}
/*
* transmit timeout handler
*/
static void tx_timeout(unsigned long context)
{
struct slgt_info *info = (struct slgt_info*)context;
unsigned long flags;
DBGINFO(("%s tx_timeout\n", info->device_name));
if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
tx_stop(info);
spin_unlock_irqrestore(&info->lock,flags);
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
#endif
bh_transmit(info);
}
/*
* receive buffer polling timer
*/
static void rx_timeout(unsigned long context)
{
struct slgt_info *info = (struct slgt_info*)context;
unsigned long flags;
DBGINFO(("%s rx_timeout\n", info->device_name));
spin_lock_irqsave(&info->lock, flags);
info->pending_bh |= BH_RECEIVE;
spin_unlock_irqrestore(&info->lock, flags);
bh_handler(&info->task);
}
| gpl-2.0 |
AOKP/kernel_sony_common | net/dcb/dcbevent.c | 5441 | 1386 | /*
* Copyright (c) 2010, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Author: John Fastabend <john.r.fastabend@intel.com>
*/
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
#include <linux/export.h>
static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain);
int register_dcbevent_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&dcbevent_notif_chain, nb);
}
EXPORT_SYMBOL(register_dcbevent_notifier);
int unregister_dcbevent_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&dcbevent_notif_chain, nb);
}
EXPORT_SYMBOL(unregister_dcbevent_notifier);
int call_dcbevent_notifiers(unsigned long val, void *v)
{
return atomic_notifier_call_chain(&dcbevent_notif_chain, val, v);
}
| gpl-2.0 |
fire855/android_kernel_wiko_l5510 | drivers/watchdog/wdrtas.c | 7233 | 16518 | /*
* FIXME: add wdrtas_get_status and wdrtas_get_boot_status as soon as
* RTAS calls are available
*/
/*
* RTAS watchdog driver
*
* (C) Copyright IBM Corp. 2005
* device driver to exploit watchdog RTAS functions
*
* Authors : Utz Bacher <utz.bacher@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
#include <asm/rtas.h>
#define WDRTAS_MAGIC_CHAR 42
#define WDRTAS_SUPPORTED_MASK (WDIOF_SETTIMEOUT | \
WDIOF_MAGICCLOSE)
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
MODULE_DESCRIPTION("RTAS watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS_MISCDEV(TEMP_MINOR);
static bool wdrtas_nowayout = WATCHDOG_NOWAYOUT;
static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0);
static char wdrtas_expect_close;
static int wdrtas_interval;
#define WDRTAS_THERMAL_SENSOR 3
static int wdrtas_token_get_sensor_state;
#define WDRTAS_SURVEILLANCE_IND 9000
static int wdrtas_token_set_indicator;
#define WDRTAS_SP_SPI 28
static int wdrtas_token_get_sp;
static int wdrtas_token_event_scan;
#define WDRTAS_DEFAULT_INTERVAL 300
#define WDRTAS_LOGBUFFER_LEN 128
static char wdrtas_logbuffer[WDRTAS_LOGBUFFER_LEN];
/*** watchdog access functions */
/**
* wdrtas_set_interval - sets the watchdog interval
* @interval: new interval
*
* returns 0 on success, <0 on failures
*
* wdrtas_set_interval sets the watchdog keepalive interval by calling the
* RTAS function set-indicator (surveillance). The unit of interval is
* seconds.
*/
static int wdrtas_set_interval(int interval)
{
long result;
static int print_msg = 10;
/* rtas uses minutes */
interval = (interval + 59) / 60;
result = rtas_call(wdrtas_token_set_indicator, 3, 1, NULL,
WDRTAS_SURVEILLANCE_IND, 0, interval);
if (result < 0 && print_msg) {
pr_err("setting the watchdog to %i timeout failed: %li\n",
interval, result);
print_msg--;
}
return result;
}
#define WDRTAS_SP_SPI_LEN 4
/**
* wdrtas_get_interval - returns the current watchdog interval
* @fallback_value: value (in seconds) to use, if the RTAS call fails
*
* returns the interval
*
* wdrtas_get_interval returns the current watchdog keepalive interval
* as reported by the RTAS function ibm,get-system-parameter. The unit
* of the return value is seconds.
*/
static int wdrtas_get_interval(int fallback_value)
{
long result;
char value[WDRTAS_SP_SPI_LEN];
spin_lock(&rtas_data_buf_lock);
memset(rtas_data_buf, 0, WDRTAS_SP_SPI_LEN);
result = rtas_call(wdrtas_token_get_sp, 3, 1, NULL,
WDRTAS_SP_SPI, __pa(rtas_data_buf),
WDRTAS_SP_SPI_LEN);
memcpy(value, rtas_data_buf, WDRTAS_SP_SPI_LEN);
spin_unlock(&rtas_data_buf_lock);
if (value[0] != 0 || value[1] != 2 || value[3] != 0 || result < 0) {
pr_warn("could not get sp_spi watchdog timeout (%li). Continuing\n",
result);
return fallback_value;
}
/* rtas uses minutes */
return ((int)value[2]) * 60;
}
/**
* wdrtas_timer_start - starts watchdog
*
* wdrtas_timer_start starts the watchdog by calling the RTAS function
* set-interval (surveillance)
*/
static void wdrtas_timer_start(void)
{
wdrtas_set_interval(wdrtas_interval);
}
/**
* wdrtas_timer_stop - stops watchdog
*
* wdrtas_timer_stop stops the watchdog timer by calling the RTAS function
* set-interval (surveillance)
*/
static void wdrtas_timer_stop(void)
{
wdrtas_set_interval(0);
}
/**
* wdrtas_log_scanned_event - logs an event we received during keepalive
*
* wdrtas_log_scanned_event prints a message to the log buffer dumping
* the results of the last event-scan call
*/
static void wdrtas_log_scanned_event(void)
{
int i;
for (i = 0; i < WDRTAS_LOGBUFFER_LEN; i += 16)
pr_info("dumping event (line %i/%i), data = "
"%02x %02x %02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x %02x %02x\n",
(i / 16) + 1, (WDRTAS_LOGBUFFER_LEN / 16),
wdrtas_logbuffer[i + 0], wdrtas_logbuffer[i + 1],
wdrtas_logbuffer[i + 2], wdrtas_logbuffer[i + 3],
wdrtas_logbuffer[i + 4], wdrtas_logbuffer[i + 5],
wdrtas_logbuffer[i + 6], wdrtas_logbuffer[i + 7],
wdrtas_logbuffer[i + 8], wdrtas_logbuffer[i + 9],
wdrtas_logbuffer[i + 10], wdrtas_logbuffer[i + 11],
wdrtas_logbuffer[i + 12], wdrtas_logbuffer[i + 13],
wdrtas_logbuffer[i + 14], wdrtas_logbuffer[i + 15]);
}
/**
* wdrtas_timer_keepalive - resets watchdog timer to keep system alive
*
* wdrtas_timer_keepalive restarts the watchdog timer by calling the
* RTAS function event-scan and repeats these calls as long as there are
* events available. All events will be dumped.
*/
static void wdrtas_timer_keepalive(void)
{
long result;
do {
result = rtas_call(wdrtas_token_event_scan, 4, 1, NULL,
RTAS_EVENT_SCAN_ALL_EVENTS, 0,
(void *)__pa(wdrtas_logbuffer),
WDRTAS_LOGBUFFER_LEN);
if (result < 0)
pr_err("event-scan failed: %li\n", result);
if (result == 0)
wdrtas_log_scanned_event();
} while (result == 0);
}
/**
* wdrtas_get_temperature - returns current temperature
*
* returns temperature or <0 on failures
*
* wdrtas_get_temperature returns the current temperature in Fahrenheit. It
* uses the RTAS call get-sensor-state, token 3 to do so
*/
static int wdrtas_get_temperature(void)
{
int result;
int temperature = 0;
result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature);
if (result < 0)
pr_warn("reading the thermal sensor failed: %i\n", result);
else
temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */
return temperature;
}
/**
* wdrtas_get_status - returns the status of the watchdog
*
* returns a bitmask of defines WDIOF_... as defined in
* include/linux/watchdog.h
*/
static int wdrtas_get_status(void)
{
return 0; /* TODO */
}
/**
* wdrtas_get_boot_status - returns the reason for the last boot
*
* returns a bitmask of defines WDIOF_... as defined in
* include/linux/watchdog.h, indicating why the watchdog rebooted the system
*/
static int wdrtas_get_boot_status(void)
{
return 0; /* TODO */
}
/*** watchdog API and operations stuff */
/* wdrtas_write - called when watchdog device is written to
* @file: file structure
* @buf: user buffer with data
* @len: amount to data written
* @ppos: position in file
*
* returns the number of successfully processed characters, which is always
* the number of bytes passed to this function
*
* wdrtas_write processes all the data given to it and looks for the magic
* character 'V'. This character allows the watchdog device to be closed
* properly.
*/
static ssize_t wdrtas_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
int i;
char c;
if (!len)
goto out;
if (!wdrtas_nowayout) {
wdrtas_expect_close = 0;
/* look for 'V' */
for (i = 0; i < len; i++) {
if (get_user(c, buf + i))
return -EFAULT;
/* allow to close device */
if (c == 'V')
wdrtas_expect_close = WDRTAS_MAGIC_CHAR;
}
}
wdrtas_timer_keepalive();
out:
return len;
}
/**
* wdrtas_ioctl - ioctl function for the watchdog device
* @file: file structure
* @cmd: command for ioctl
* @arg: argument pointer
*
* returns 0 on success, <0 on failure
*
* wdrtas_ioctl implements the watchdog API ioctls
*/
static long wdrtas_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int __user *argp = (void __user *)arg;
int i;
static const struct watchdog_info wdinfo = {
.options = WDRTAS_SUPPORTED_MASK,
.firmware_version = 0,
.identity = "wdrtas",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &wdinfo, sizeof(wdinfo)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
i = wdrtas_get_status();
return put_user(i, argp);
case WDIOC_GETBOOTSTATUS:
i = wdrtas_get_boot_status();
return put_user(i, argp);
case WDIOC_GETTEMP:
if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE)
return -EOPNOTSUPP;
i = wdrtas_get_temperature();
return put_user(i, argp);
case WDIOC_SETOPTIONS:
if (get_user(i, argp))
return -EFAULT;
if (i & WDIOS_DISABLECARD)
wdrtas_timer_stop();
if (i & WDIOS_ENABLECARD) {
wdrtas_timer_keepalive();
wdrtas_timer_start();
}
/* not implemented. Done by H8
if (i & WDIOS_TEMPPANIC) {
} */
return 0;
case WDIOC_KEEPALIVE:
wdrtas_timer_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(i, argp))
return -EFAULT;
if (wdrtas_set_interval(i))
return -EINVAL;
wdrtas_timer_keepalive();
if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE)
wdrtas_interval = i;
else
wdrtas_interval = wdrtas_get_interval(i);
/* fallthrough */
case WDIOC_GETTIMEOUT:
return put_user(wdrtas_interval, argp);
default:
return -ENOTTY;
}
}
/**
* wdrtas_open - open function of watchdog device
* @inode: inode structure
* @file: file structure
*
* returns 0 on success, -EBUSY if the file has been opened already, <0 on
* other failures
*
* function called when watchdog device is opened
*/
static int wdrtas_open(struct inode *inode, struct file *file)
{
/* only open once */
if (atomic_inc_return(&wdrtas_miscdev_open) > 1) {
atomic_dec(&wdrtas_miscdev_open);
return -EBUSY;
}
wdrtas_timer_start();
wdrtas_timer_keepalive();
return nonseekable_open(inode, file);
}
/**
* wdrtas_close - close function of watchdog device
* @inode: inode structure
* @file: file structure
*
* returns 0 on success
*
* close function. Always succeeds
*/
static int wdrtas_close(struct inode *inode, struct file *file)
{
/* only stop watchdog, if this was announced using 'V' before */
if (wdrtas_expect_close == WDRTAS_MAGIC_CHAR)
wdrtas_timer_stop();
else {
pr_warn("got unexpected close. Watchdog not stopped.\n");
wdrtas_timer_keepalive();
}
wdrtas_expect_close = 0;
atomic_dec(&wdrtas_miscdev_open);
return 0;
}
/**
* wdrtas_temp_read - gives back the temperature in fahrenheit
* @file: file structure
* @buf: user buffer
* @count: number of bytes to be read
* @ppos: position in file
*
* returns always 1 or -EFAULT in case of user space copy failures, <0 on
* other failures
*
* wdrtas_temp_read gives the temperature to the users by copying this
* value as one byte into the user space buffer. The unit is Fahrenheit...
*/
static ssize_t wdrtas_temp_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int temperature = 0;
temperature = wdrtas_get_temperature();
if (temperature < 0)
return temperature;
if (copy_to_user(buf, &temperature, 1))
return -EFAULT;
return 1;
}
/**
* wdrtas_temp_open - open function of temperature device
* @inode: inode structure
* @file: file structure
*
* returns 0 on success, <0 on failure
*
* function called when temperature device is opened
*/
static int wdrtas_temp_open(struct inode *inode, struct file *file)
{
return nonseekable_open(inode, file);
}
/**
* wdrtas_temp_close - close function of temperature device
* @inode: inode structure
* @file: file structure
*
* returns 0 on success
*
* close function. Always succeeds
*/
static int wdrtas_temp_close(struct inode *inode, struct file *file)
{
return 0;
}
/**
* wdrtas_reboot - reboot notifier function
* @nb: notifier block structure
* @code: reboot code
* @ptr: unused
*
* returns NOTIFY_DONE
*
* wdrtas_reboot stops the watchdog in case of a reboot
*/
static int wdrtas_reboot(struct notifier_block *this,
unsigned long code, void *ptr)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdrtas_timer_stop();
return NOTIFY_DONE;
}
/*** initialization stuff */
static const struct file_operations wdrtas_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = wdrtas_write,
.unlocked_ioctl = wdrtas_ioctl,
.open = wdrtas_open,
.release = wdrtas_close,
};
static struct miscdevice wdrtas_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdrtas_fops,
};
static const struct file_operations wdrtas_temp_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = wdrtas_temp_read,
.open = wdrtas_temp_open,
.release = wdrtas_temp_close,
};
static struct miscdevice wdrtas_tempdev = {
.minor = TEMP_MINOR,
.name = "temperature",
.fops = &wdrtas_temp_fops,
};
static struct notifier_block wdrtas_notifier = {
.notifier_call = wdrtas_reboot,
};
/**
* wdrtas_get_tokens - reads in RTAS tokens
*
* returns 0 on success, <0 on failure
*
* wdrtas_get_tokens reads in the tokens for the RTAS calls used in
* this watchdog driver. It tolerates, if "get-sensor-state" and
* "ibm,get-system-parameter" are not available.
*/
static int wdrtas_get_tokens(void)
{
wdrtas_token_get_sensor_state = rtas_token("get-sensor-state");
if (wdrtas_token_get_sensor_state == RTAS_UNKNOWN_SERVICE) {
pr_warn("couldn't get token for get-sensor-state. Trying to continue without temperature support.\n");
}
wdrtas_token_get_sp = rtas_token("ibm,get-system-parameter");
if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE) {
pr_warn("couldn't get token for ibm,get-system-parameter. Trying to continue with a default timeout value of %i seconds.\n",
WDRTAS_DEFAULT_INTERVAL);
}
wdrtas_token_set_indicator = rtas_token("set-indicator");
if (wdrtas_token_set_indicator == RTAS_UNKNOWN_SERVICE) {
pr_err("couldn't get token for set-indicator. Terminating watchdog code.\n");
return -EIO;
}
wdrtas_token_event_scan = rtas_token("event-scan");
if (wdrtas_token_event_scan == RTAS_UNKNOWN_SERVICE) {
pr_err("couldn't get token for event-scan. Terminating watchdog code.\n");
return -EIO;
}
return 0;
}
/**
* wdrtas_unregister_devs - unregisters the misc dev handlers
*
* wdrtas_register_devs unregisters the watchdog and temperature watchdog
* misc devs
*/
static void wdrtas_unregister_devs(void)
{
misc_deregister(&wdrtas_miscdev);
if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE)
misc_deregister(&wdrtas_tempdev);
}
/**
* wdrtas_register_devs - registers the misc dev handlers
*
* returns 0 on success, <0 on failure
*
* wdrtas_register_devs registers the watchdog and temperature watchdog
* misc devs
*/
static int wdrtas_register_devs(void)
{
int result;
result = misc_register(&wdrtas_miscdev);
if (result) {
pr_err("couldn't register watchdog misc device. Terminating watchdog code.\n");
return result;
}
if (wdrtas_token_get_sensor_state != RTAS_UNKNOWN_SERVICE) {
result = misc_register(&wdrtas_tempdev);
if (result) {
pr_warn("couldn't register watchdog temperature misc device. Continuing without temperature support.\n");
wdrtas_token_get_sensor_state = RTAS_UNKNOWN_SERVICE;
}
}
return 0;
}
/**
* wdrtas_init - init function of the watchdog driver
*
* returns 0 on success, <0 on failure
*
* registers the file handlers and the reboot notifier
*/
static int __init wdrtas_init(void)
{
if (wdrtas_get_tokens())
return -ENODEV;
if (wdrtas_register_devs())
return -ENODEV;
if (register_reboot_notifier(&wdrtas_notifier)) {
pr_err("could not register reboot notifier. Terminating watchdog code.\n");
wdrtas_unregister_devs();
return -ENODEV;
}
if (wdrtas_token_get_sp == RTAS_UNKNOWN_SERVICE)
wdrtas_interval = WDRTAS_DEFAULT_INTERVAL;
else
wdrtas_interval = wdrtas_get_interval(WDRTAS_DEFAULT_INTERVAL);
return 0;
}
/**
* wdrtas_exit - exit function of the watchdog driver
*
* unregisters the file handlers and the reboot notifier
*/
static void __exit wdrtas_exit(void)
{
if (!wdrtas_nowayout)
wdrtas_timer_stop();
wdrtas_unregister_devs();
unregister_reboot_notifier(&wdrtas_notifier);
}
module_init(wdrtas_init);
module_exit(wdrtas_exit);
| gpl-2.0 |
sztupy/samsung-kernel-herring | drivers/gpu/drm/radeon/radeon_ioc32.c | 9281 | 13452 | /**
* \file radeon_ioc32.c
*
* 32-bit ioctl compatibility routines for the Radeon DRM.
*
* \author Paul Mackerras <paulus@samba.org>
*
* Copyright (C) Paul Mackerras 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
typedef struct drm_radeon_init32 {
int func;
u32 sarea_priv_offset;
int is_pci;
int cp_mode;
int gart_size;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
u32 fb_offset;
u32 mmio_offset;
u32 ring_offset;
u32 ring_rptr_offset;
u32 buffers_offset;
u32 gart_textures_offset;
} drm_radeon_init32_t;
static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_init32_t init32;
drm_radeon_init_t __user *init;
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
init = compat_alloc_user_space(sizeof(*init));
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|| __put_user(init32.func, &init->func)
|| __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
|| __put_user(init32.is_pci, &init->is_pci)
|| __put_user(init32.cp_mode, &init->cp_mode)
|| __put_user(init32.gart_size, &init->gart_size)
|| __put_user(init32.ring_size, &init->ring_size)
|| __put_user(init32.usec_timeout, &init->usec_timeout)
|| __put_user(init32.fb_bpp, &init->fb_bpp)
|| __put_user(init32.front_offset, &init->front_offset)
|| __put_user(init32.front_pitch, &init->front_pitch)
|| __put_user(init32.back_offset, &init->back_offset)
|| __put_user(init32.back_pitch, &init->back_pitch)
|| __put_user(init32.depth_bpp, &init->depth_bpp)
|| __put_user(init32.depth_offset, &init->depth_offset)
|| __put_user(init32.depth_pitch, &init->depth_pitch)
|| __put_user(init32.fb_offset, &init->fb_offset)
|| __put_user(init32.mmio_offset, &init->mmio_offset)
|| __put_user(init32.ring_offset, &init->ring_offset)
|| __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
|| __put_user(init32.buffers_offset, &init->buffers_offset)
|| __put_user(init32.gart_textures_offset,
&init->gart_textures_offset))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
}
typedef struct drm_radeon_clear32 {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask; /* misnamed field: should be stencil */
u32 depth_boxes;
} drm_radeon_clear32_t;
static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_clear32_t clr32;
drm_radeon_clear_t __user *clr;
if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
return -EFAULT;
clr = compat_alloc_user_space(sizeof(*clr));
if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
|| __put_user(clr32.flags, &clr->flags)
|| __put_user(clr32.clear_color, &clr->clear_color)
|| __put_user(clr32.clear_depth, &clr->clear_depth)
|| __put_user(clr32.color_mask, &clr->color_mask)
|| __put_user(clr32.depth_mask, &clr->depth_mask)
|| __put_user((void __user *)(unsigned long)clr32.depth_boxes,
&clr->depth_boxes))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
}
typedef struct drm_radeon_stipple32 {
u32 mask;
} drm_radeon_stipple32_t;
static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_stipple32_t __user *argp = (void __user *)arg;
drm_radeon_stipple_t __user *request;
u32 mask;
if (get_user(mask, &argp->mask))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user((unsigned int __user *)(unsigned long)mask,
&request->mask))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
}
typedef struct drm_radeon_tex_image32 {
unsigned int x, y; /* Blit coordinates */
unsigned int width, height;
u32 data;
} drm_radeon_tex_image32_t;
typedef struct drm_radeon_texture32 {
unsigned int offset;
int pitch;
int format;
int width; /* Texture image coordinates */
int height;
u32 image;
} drm_radeon_texture32_t;
static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_texture32_t req32;
drm_radeon_texture_t __user *request;
drm_radeon_tex_image32_t img32;
drm_radeon_tex_image_t __user *image;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
if (req32.image == 0)
return -EINVAL;
if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
sizeof(img32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
if (!access_ok(VERIFY_WRITE, request,
sizeof(*request) + sizeof(*image)))
return -EFAULT;
image = (drm_radeon_tex_image_t __user *) (request + 1);
if (__put_user(req32.offset, &request->offset)
|| __put_user(req32.pitch, &request->pitch)
|| __put_user(req32.format, &request->format)
|| __put_user(req32.width, &request->width)
|| __put_user(req32.height, &request->height)
|| __put_user(image, &request->image)
|| __put_user(img32.x, &image->x)
|| __put_user(img32.y, &image->y)
|| __put_user(img32.width, &image->width)
|| __put_user(img32.height, &image->height)
|| __put_user((const void __user *)(unsigned long)img32.data,
&image->data))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
}
typedef struct drm_radeon_vertex2_32 {
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int nr_states;
u32 state;
int nr_prims;
u32 prim;
} drm_radeon_vertex2_32_t;
static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_vertex2_32_t req32;
drm_radeon_vertex2_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.idx, &request->idx)
|| __put_user(req32.discard, &request->discard)
|| __put_user(req32.nr_states, &request->nr_states)
|| __put_user((void __user *)(unsigned long)req32.state,
&request->state)
|| __put_user(req32.nr_prims, &request->nr_prims)
|| __put_user((void __user *)(unsigned long)req32.prim,
&request->prim))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
}
typedef struct drm_radeon_cmd_buffer32 {
int bufsz;
u32 buf;
int nbox;
u32 boxes;
} drm_radeon_cmd_buffer32_t;
static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_cmd_buffer32_t req32;
drm_radeon_cmd_buffer_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.bufsz, &request->bufsz)
|| __put_user((void __user *)(unsigned long)req32.buf,
&request->buf)
|| __put_user(req32.nbox, &request->nbox)
|| __put_user((void __user *)(unsigned long)req32.boxes,
&request->boxes))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
}
typedef struct drm_radeon_getparam32 {
int param;
u32 value;
} drm_radeon_getparam32_t;
static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_getparam32_t req32;
drm_radeon_getparam_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.param, &request->param)
|| __put_user((void __user *)(unsigned long)req32.value,
&request->value))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
}
typedef struct drm_radeon_mem_alloc32 {
int region;
int alignment;
int size;
u32 region_offset; /* offset from start of fb or GART */
} drm_radeon_mem_alloc32_t;
static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_mem_alloc32_t req32;
drm_radeon_mem_alloc_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.region, &request->region)
|| __put_user(req32.alignment, &request->alignment)
|| __put_user(req32.size, &request->size)
|| __put_user((int __user *)(unsigned long)req32.region_offset,
&request->region_offset))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
}
typedef struct drm_radeon_irq_emit32 {
u32 irq_seq;
} drm_radeon_irq_emit32_t;
static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_irq_emit32_t req32;
drm_radeon_irq_emit_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user((int __user *)(unsigned long)req32.irq_seq,
&request->irq_seq))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
}
/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
typedef struct drm_radeon_setparam32 {
int param;
u64 value;
} __attribute__((packed)) drm_radeon_setparam32_t;
static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_radeon_setparam32_t req32;
drm_radeon_setparam_t __user *request;
if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.param, &request->param)
|| __put_user((void __user *)(unsigned long)req32.value,
&request->value))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
}
#else
#define compat_radeon_cp_setparam NULL
#endif /* X86_64 || IA64 */
drm_ioctl_compat_t *radeon_compat_ioctls[] = {
[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
[DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
[DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
[DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
[DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
[DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
[DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
[DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
[DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
};
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
| gpl-2.0 |
srfarias/srfarias-kernel | sound/pci/ice1712/quartet.c | 9281 | 30730 | /*
* ALSA driver for ICEnsemble VT1724 (Envy24HT)
*
* Lowlevel functions for Infrasonic Quartet
*
* Copyright (c) 2009 Pavel Hofman <pavel.hofman@ivitera.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/tlv.h>
#include <sound/info.h>
#include "ice1712.h"
#include "envy24ht.h"
#include <sound/ak4113.h>
#include "quartet.h"
struct qtet_spec {
struct ak4113 *ak4113;
unsigned int scr; /* system control register */
unsigned int mcr; /* monitoring control register */
unsigned int cpld; /* cpld register */
};
struct qtet_kcontrol_private {
unsigned int bit;
void (*set_register)(struct snd_ice1712 *ice, unsigned int val);
unsigned int (*get_register)(struct snd_ice1712 *ice);
unsigned char *texts[2];
};
enum {
IN12_SEL = 0,
IN34_SEL,
AIN34_SEL,
COAX_OUT,
IN12_MON12,
IN12_MON34,
IN34_MON12,
IN34_MON34,
OUT12_MON34,
OUT34_MON12,
};
static char *ext_clock_names[3] = {"IEC958 In", "Word Clock 1xFS",
"Word Clock 256xFS"};
/* chip address on I2C bus */
#define AK4113_ADDR 0x26 /* S/PDIF receiver */
/* chip address on SPI bus */
#define AK4620_ADDR 0x02 /* ADC/DAC */
/*
* GPIO pins
*/
/* GPIO0 - O - DATA0, def. 0 */
#define GPIO_D0 (1<<0)
/* GPIO1 - I/O - DATA1, Jack Detect Input0 (0:present, 1:missing), def. 1 */
#define GPIO_D1_JACKDTC0 (1<<1)
/* GPIO2 - I/O - DATA2, Jack Detect Input1 (0:present, 1:missing), def. 1 */
#define GPIO_D2_JACKDTC1 (1<<2)
/* GPIO3 - I/O - DATA3, def. 1 */
#define GPIO_D3 (1<<3)
/* GPIO4 - I/O - DATA4, SPI CDTO, def. 1 */
#define GPIO_D4_SPI_CDTO (1<<4)
/* GPIO5 - I/O - DATA5, SPI CCLK, def. 1 */
#define GPIO_D5_SPI_CCLK (1<<5)
/* GPIO6 - I/O - DATA6, Cable Detect Input (0:detected, 1:not detected */
#define GPIO_D6_CD (1<<6)
/* GPIO7 - I/O - DATA7, Device Detect Input (0:detected, 1:not detected */
#define GPIO_D7_DD (1<<7)
/* GPIO8 - O - CPLD Chip Select, def. 1 */
#define GPIO_CPLD_CSN (1<<8)
/* GPIO9 - O - CPLD register read/write (0:write, 1:read), def. 0 */
#define GPIO_CPLD_RW (1<<9)
/* GPIO10 - O - SPI Chip Select for CODEC#0, def. 1 */
#define GPIO_SPI_CSN0 (1<<10)
/* GPIO11 - O - SPI Chip Select for CODEC#1, def. 1 */
#define GPIO_SPI_CSN1 (1<<11)
/* GPIO12 - O - Ex. Register Output Enable (0:enable, 1:disable), def. 1,
* init 0 */
#define GPIO_EX_GPIOE (1<<12)
/* GPIO13 - O - Ex. Register0 Chip Select for System Control Register,
* def. 1 */
#define GPIO_SCR (1<<13)
/* GPIO14 - O - Ex. Register1 Chip Select for Monitor Control Register,
* def. 1 */
#define GPIO_MCR (1<<14)
#define GPIO_SPI_ALL (GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK |\
GPIO_SPI_CSN0 | GPIO_SPI_CSN1)
#define GPIO_DATA_MASK (GPIO_D0 | GPIO_D1_JACKDTC0 | \
GPIO_D2_JACKDTC1 | GPIO_D3 | \
GPIO_D4_SPI_CDTO | GPIO_D5_SPI_CCLK | \
GPIO_D6_CD | GPIO_D7_DD)
/* System Control Register GPIO_SCR data bits */
/* Mic/Line select relay (0:line, 1:mic) */
#define SCR_RELAY GPIO_D0
/* Phantom power drive control (0:5V, 1:48V) */
#define SCR_PHP_V GPIO_D1_JACKDTC0
/* H/W mute control (0:Normal, 1:Mute) */
#define SCR_MUTE GPIO_D2_JACKDTC1
/* Phantom power control (0:Phantom on, 1:off) */
#define SCR_PHP GPIO_D3
/* Analog input 1/2 Source Select */
#define SCR_AIN12_SEL0 GPIO_D4_SPI_CDTO
#define SCR_AIN12_SEL1 GPIO_D5_SPI_CCLK
/* Analog input 3/4 Source Select (0:line, 1:hi-z) */
#define SCR_AIN34_SEL GPIO_D6_CD
/* Codec Power Down (0:power down, 1:normal) */
#define SCR_CODEC_PDN GPIO_D7_DD
#define SCR_AIN12_LINE (0)
#define SCR_AIN12_MIC (SCR_AIN12_SEL0)
#define SCR_AIN12_LOWCUT (SCR_AIN12_SEL1 | SCR_AIN12_SEL0)
/* Monitor Control Register GPIO_MCR data bits */
/* Input 1/2 to Monitor 1/2 (0:off, 1:on) */
#define MCR_IN12_MON12 GPIO_D0
/* Input 1/2 to Monitor 3/4 (0:off, 1:on) */
#define MCR_IN12_MON34 GPIO_D1_JACKDTC0
/* Input 3/4 to Monitor 1/2 (0:off, 1:on) */
#define MCR_IN34_MON12 GPIO_D2_JACKDTC1
/* Input 3/4 to Monitor 3/4 (0:off, 1:on) */
#define MCR_IN34_MON34 GPIO_D3
/* Output to Monitor 1/2 (0:off, 1:on) */
#define MCR_OUT34_MON12 GPIO_D4_SPI_CDTO
/* Output to Monitor 3/4 (0:off, 1:on) */
#define MCR_OUT12_MON34 GPIO_D5_SPI_CCLK
/* CPLD Register DATA bits */
/* Clock Rate Select */
#define CPLD_CKS0 GPIO_D0
#define CPLD_CKS1 GPIO_D1_JACKDTC0
#define CPLD_CKS2 GPIO_D2_JACKDTC1
/* Sync Source Select (0:Internal, 1:External) */
#define CPLD_SYNC_SEL GPIO_D3
/* Word Clock FS Select (0:FS, 1:256FS) */
#define CPLD_WORD_SEL GPIO_D4_SPI_CDTO
/* Coaxial Output Source (IS-Link) (0:SPDIF, 1:I2S) */
#define CPLD_COAX_OUT GPIO_D5_SPI_CCLK
/* Input 1/2 Source Select (0:Analog12, 1:An34) */
#define CPLD_IN12_SEL GPIO_D6_CD
/* Input 3/4 Source Select (0:Analog34, 1:Digital In) */
#define CPLD_IN34_SEL GPIO_D7_DD
/* internal clock (CPLD_SYNC_SEL = 0) options */
#define CPLD_CKS_44100HZ (0)
#define CPLD_CKS_48000HZ (CPLD_CKS0)
#define CPLD_CKS_88200HZ (CPLD_CKS1)
#define CPLD_CKS_96000HZ (CPLD_CKS1 | CPLD_CKS0)
#define CPLD_CKS_176400HZ (CPLD_CKS2)
#define CPLD_CKS_192000HZ (CPLD_CKS2 | CPLD_CKS0)
#define CPLD_CKS_MASK (CPLD_CKS0 | CPLD_CKS1 | CPLD_CKS2)
/* external clock (CPLD_SYNC_SEL = 1) options */
/* external clock - SPDIF */
#define CPLD_EXT_SPDIF (0 | CPLD_SYNC_SEL)
/* external clock - WordClock 1xfs */
#define CPLD_EXT_WORDCLOCK_1FS (CPLD_CKS1 | CPLD_SYNC_SEL)
/* external clock - WordClock 256xfs */
#define CPLD_EXT_WORDCLOCK_256FS (CPLD_CKS1 | CPLD_WORD_SEL |\
CPLD_SYNC_SEL)
#define EXT_SPDIF_TYPE 0
#define EXT_WORDCLOCK_1FS_TYPE 1
#define EXT_WORDCLOCK_256FS_TYPE 2
#define AK4620_DFS0 (1<<0)
#define AK4620_DFS1 (1<<1)
#define AK4620_CKS0 (1<<2)
#define AK4620_CKS1 (1<<3)
/* Clock and Format Control register */
#define AK4620_DFS_REG 0x02
/* Deem and Volume Control register */
#define AK4620_DEEMVOL_REG 0x03
#define AK4620_SMUTE (1<<7)
/*
* Conversion from int value to its binary form. Used for debugging.
* The output buffer must be allocated prior to calling the function.
*/
static char *get_binary(char *buffer, int value)
{
int i, j, pos;
pos = 0;
for (i = 0; i < 4; ++i) {
for (j = 0; j < 8; ++j) {
if (value & (1 << (31-(i*8 + j))))
buffer[pos] = '1';
else
buffer[pos] = '0';
pos++;
}
if (i < 3) {
buffer[pos] = ' ';
pos++;
}
}
buffer[pos] = '\0';
return buffer;
}
/*
* Initial setup of the conversion array GPIO <-> rate
*/
static unsigned int qtet_rates[] = {
44100, 48000, 88200,
96000, 176400, 192000,
};
static unsigned int cks_vals[] = {
CPLD_CKS_44100HZ, CPLD_CKS_48000HZ, CPLD_CKS_88200HZ,
CPLD_CKS_96000HZ, CPLD_CKS_176400HZ, CPLD_CKS_192000HZ,
};
static struct snd_pcm_hw_constraint_list qtet_rates_info = {
.count = ARRAY_SIZE(qtet_rates),
.list = qtet_rates,
.mask = 0,
};
static void qtet_ak4113_write(void *private_data, unsigned char reg,
unsigned char val)
{
snd_vt1724_write_i2c((struct snd_ice1712 *)private_data, AK4113_ADDR,
reg, val);
}
static unsigned char qtet_ak4113_read(void *private_data, unsigned char reg)
{
return snd_vt1724_read_i2c((struct snd_ice1712 *)private_data,
AK4113_ADDR, reg);
}
/*
* AK4620 section
*/
/*
* Write data to addr register of ak4620
*/
static void qtet_akm_write(struct snd_akm4xxx *ak, int chip,
unsigned char addr, unsigned char data)
{
unsigned int tmp, orig_dir;
int idx;
unsigned int addrdata;
struct snd_ice1712 *ice = ak->private_data[0];
if (snd_BUG_ON(chip < 0 || chip >= 4))
return;
/*printk(KERN_DEBUG "Writing to AK4620: chip=%d, addr=0x%x,
data=0x%x\n", chip, addr, data);*/
orig_dir = ice->gpio.get_dir(ice);
ice->gpio.set_dir(ice, orig_dir | GPIO_SPI_ALL);
/* set mask - only SPI bits */
ice->gpio.set_mask(ice, ~GPIO_SPI_ALL);
tmp = ice->gpio.get_data(ice);
/* high all */
tmp |= GPIO_SPI_ALL;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* drop chip select */
if (chip)
/* CODEC 1 */
tmp &= ~GPIO_SPI_CSN1;
else
tmp &= ~GPIO_SPI_CSN0;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* build I2C address + data byte */
addrdata = (AK4620_ADDR << 6) | 0x20 | (addr & 0x1f);
addrdata = (addrdata << 8) | data;
for (idx = 15; idx >= 0; idx--) {
/* drop clock */
tmp &= ~GPIO_D5_SPI_CCLK;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* set data */
if (addrdata & (1 << idx))
tmp |= GPIO_D4_SPI_CDTO;
else
tmp &= ~GPIO_D4_SPI_CDTO;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* raise clock */
tmp |= GPIO_D5_SPI_CCLK;
ice->gpio.set_data(ice, tmp);
udelay(100);
}
/* all back to 1 */
tmp |= GPIO_SPI_ALL;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* return all gpios to non-writable */
ice->gpio.set_mask(ice, 0xffffff);
/* restore GPIOs direction */
ice->gpio.set_dir(ice, orig_dir);
}
static void qtet_akm_set_regs(struct snd_akm4xxx *ak, unsigned char addr,
unsigned char mask, unsigned char value)
{
unsigned char tmp;
int chip;
for (chip = 0; chip < ak->num_chips; chip++) {
tmp = snd_akm4xxx_get(ak, chip, addr);
/* clear the bits */
tmp &= ~mask;
/* set the new bits */
tmp |= value;
snd_akm4xxx_write(ak, chip, addr, tmp);
}
}
/*
* change the rate of AK4620
*/
static void qtet_akm_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate)
{
unsigned char ak4620_dfs;
if (rate == 0) /* no hint - S/PDIF input is master or the new spdif
input rate undetected, simply return */
return;
/* adjust DFS on codecs - see datasheet */
if (rate > 108000)
ak4620_dfs = AK4620_DFS1 | AK4620_CKS1;
else if (rate > 54000)
ak4620_dfs = AK4620_DFS0 | AK4620_CKS0;
else
ak4620_dfs = 0;
/* set new value */
qtet_akm_set_regs(ak, AK4620_DFS_REG, AK4620_DFS0 | AK4620_DFS1 |
AK4620_CKS0 | AK4620_CKS1, ak4620_dfs);
}
#define AK_CONTROL(xname, xch) { .name = xname, .num_channels = xch }
#define PCM_12_PLAYBACK_VOLUME "PCM 1/2 Playback Volume"
#define PCM_34_PLAYBACK_VOLUME "PCM 3/4 Playback Volume"
#define PCM_12_CAPTURE_VOLUME "PCM 1/2 Capture Volume"
#define PCM_34_CAPTURE_VOLUME "PCM 3/4 Capture Volume"
static const struct snd_akm4xxx_dac_channel qtet_dac[] = {
AK_CONTROL(PCM_12_PLAYBACK_VOLUME, 2),
AK_CONTROL(PCM_34_PLAYBACK_VOLUME, 2),
};
static const struct snd_akm4xxx_adc_channel qtet_adc[] = {
AK_CONTROL(PCM_12_CAPTURE_VOLUME, 2),
AK_CONTROL(PCM_34_CAPTURE_VOLUME, 2),
};
static struct snd_akm4xxx akm_qtet_dac __devinitdata = {
.type = SND_AK4620,
.num_dacs = 4, /* DAC1 - Output 12
*/
.num_adcs = 4, /* ADC1 - Input 12
*/
.ops = {
.write = qtet_akm_write,
.set_rate_val = qtet_akm_set_rate_val,
},
.dac_info = qtet_dac,
.adc_info = qtet_adc,
};
/* Communication routines with the CPLD */
/* Writes data to external register reg, both reg and data are
* GPIO representations */
static void reg_write(struct snd_ice1712 *ice, unsigned int reg,
unsigned int data)
{
unsigned int tmp;
mutex_lock(&ice->gpio_mutex);
/* set direction of used GPIOs*/
/* all outputs */
tmp = 0x00ffff;
ice->gpio.set_dir(ice, tmp);
/* mask - writable bits */
ice->gpio.set_mask(ice, ~(tmp));
/* write the data */
tmp = ice->gpio.get_data(ice);
tmp &= ~GPIO_DATA_MASK;
tmp |= data;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* drop output enable */
tmp &= ~GPIO_EX_GPIOE;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* drop the register gpio */
tmp &= ~reg;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* raise the register GPIO */
tmp |= reg;
ice->gpio.set_data(ice, tmp);
udelay(100);
/* raise all data gpios */
tmp |= GPIO_DATA_MASK;
ice->gpio.set_data(ice, tmp);
/* mask - immutable bits */
ice->gpio.set_mask(ice, 0xffffff);
/* outputs only 8-15 */
ice->gpio.set_dir(ice, 0x00ff00);
mutex_unlock(&ice->gpio_mutex);
}
static unsigned int get_scr(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
return spec->scr;
}
static unsigned int get_mcr(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
return spec->mcr;
}
static unsigned int get_cpld(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
return spec->cpld;
}
static void set_scr(struct snd_ice1712 *ice, unsigned int val)
{
struct qtet_spec *spec = ice->spec;
reg_write(ice, GPIO_SCR, val);
spec->scr = val;
}
static void set_mcr(struct snd_ice1712 *ice, unsigned int val)
{
struct qtet_spec *spec = ice->spec;
reg_write(ice, GPIO_MCR, val);
spec->mcr = val;
}
static void set_cpld(struct snd_ice1712 *ice, unsigned int val)
{
struct qtet_spec *spec = ice->spec;
reg_write(ice, GPIO_CPLD_CSN, val);
spec->cpld = val;
}
#ifdef CONFIG_PROC_FS
static void proc_regs_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ice1712 *ice = entry->private_data;
char bin_buffer[36];
snd_iprintf(buffer, "SCR: %s\n", get_binary(bin_buffer,
get_scr(ice)));
snd_iprintf(buffer, "MCR: %s\n", get_binary(bin_buffer,
get_mcr(ice)));
snd_iprintf(buffer, "CPLD: %s\n", get_binary(bin_buffer,
get_cpld(ice)));
}
static void proc_init(struct snd_ice1712 *ice)
{
struct snd_info_entry *entry;
if (!snd_card_proc_new(ice->card, "quartet", &entry))
snd_info_set_text_ops(entry, ice, proc_regs_read);
}
#else /* !CONFIG_PROC_FS */
static void proc_init(struct snd_ice1712 *ice) {}
#endif
static int qtet_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val;
val = get_scr(ice) & SCR_MUTE;
ucontrol->value.integer.value[0] = (val) ? 0 : 1;
return 0;
}
static int qtet_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new, smute;
old = get_scr(ice) & SCR_MUTE;
if (ucontrol->value.integer.value[0]) {
/* unmute */
new = 0;
/* un-smuting DAC */
smute = 0;
} else {
/* mute */
new = SCR_MUTE;
/* smuting DAC */
smute = AK4620_SMUTE;
}
if (old != new) {
struct snd_akm4xxx *ak = ice->akm;
set_scr(ice, (get_scr(ice) & ~SCR_MUTE) | new);
/* set smute */
qtet_akm_set_regs(ak, AK4620_DEEMVOL_REG, AK4620_SMUTE, smute);
return 1;
}
/* no change */
return 0;
}
static int qtet_ain12_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static char *texts[3] = {"Line In 1/2", "Mic", "Mic + Low-cut"};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = ARRAY_SIZE(texts);
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item =
uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int qtet_ain12_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val, result;
val = get_scr(ice) & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0);
switch (val) {
case SCR_AIN12_LINE:
result = 0;
break;
case SCR_AIN12_MIC:
result = 1;
break;
case SCR_AIN12_LOWCUT:
result = 2;
break;
default:
/* BUG - no other combinations allowed */
snd_BUG();
result = 0;
}
ucontrol->value.integer.value[0] = result;
return 0;
}
static int qtet_ain12_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new, tmp, masked_old;
old = new = get_scr(ice);
masked_old = old & (SCR_AIN12_SEL1 | SCR_AIN12_SEL0);
tmp = ucontrol->value.integer.value[0];
if (tmp == 2)
tmp = 3; /* binary 10 is not supported */
tmp <<= 4; /* shifting to SCR_AIN12_SEL0 */
if (tmp != masked_old) {
/* change requested */
switch (tmp) {
case SCR_AIN12_LINE:
new = old & ~(SCR_AIN12_SEL1 | SCR_AIN12_SEL0);
set_scr(ice, new);
/* turn off relay */
new &= ~SCR_RELAY;
set_scr(ice, new);
break;
case SCR_AIN12_MIC:
/* turn on relay */
new = old | SCR_RELAY;
set_scr(ice, new);
new = (new & ~SCR_AIN12_SEL1) | SCR_AIN12_SEL0;
set_scr(ice, new);
break;
case SCR_AIN12_LOWCUT:
/* turn on relay */
new = old | SCR_RELAY;
set_scr(ice, new);
new |= SCR_AIN12_SEL1 | SCR_AIN12_SEL0;
set_scr(ice, new);
break;
default:
snd_BUG();
}
return 1;
}
/* no change */
return 0;
}
static int qtet_php_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val;
/* if phantom voltage =48V, phantom on */
val = get_scr(ice) & SCR_PHP_V;
ucontrol->value.integer.value[0] = val ? 1 : 0;
return 0;
}
static int qtet_php_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new;
old = new = get_scr(ice);
if (ucontrol->value.integer.value[0] /* phantom on requested */
&& (~old & SCR_PHP_V)) /* 0 = voltage 5V */ {
/* is off, turn on */
/* turn voltage on first, = 1 */
new = old | SCR_PHP_V;
set_scr(ice, new);
/* turn phantom on, = 0 */
new &= ~SCR_PHP;
set_scr(ice, new);
} else if (!ucontrol->value.integer.value[0] && (old & SCR_PHP_V)) {
/* phantom off requested and 1 = voltage 48V */
/* is on, turn off */
/* turn voltage off first, = 0 */
new = old & ~SCR_PHP_V;
set_scr(ice, new);
/* turn phantom off, = 1 */
new |= SCR_PHP;
set_scr(ice, new);
}
if (old != new)
return 1;
/* no change */
return 0;
}
#define PRIV_SW(xid, xbit, xreg) [xid] = {.bit = xbit,\
.set_register = set_##xreg,\
.get_register = get_##xreg, }
#define PRIV_ENUM2(xid, xbit, xreg, xtext1, xtext2) [xid] = {.bit = xbit,\
.set_register = set_##xreg,\
.get_register = get_##xreg,\
.texts = {xtext1, xtext2} }
static struct qtet_kcontrol_private qtet_privates[] = {
PRIV_ENUM2(IN12_SEL, CPLD_IN12_SEL, cpld, "An In 1/2", "An In 3/4"),
PRIV_ENUM2(IN34_SEL, CPLD_IN34_SEL, cpld, "An In 3/4", "IEC958 In"),
PRIV_ENUM2(AIN34_SEL, SCR_AIN34_SEL, scr, "Line In 3/4", "Hi-Z"),
PRIV_ENUM2(COAX_OUT, CPLD_COAX_OUT, cpld, "IEC958", "I2S"),
PRIV_SW(IN12_MON12, MCR_IN12_MON12, mcr),
PRIV_SW(IN12_MON34, MCR_IN12_MON34, mcr),
PRIV_SW(IN34_MON12, MCR_IN34_MON12, mcr),
PRIV_SW(IN34_MON34, MCR_IN34_MON34, mcr),
PRIV_SW(OUT12_MON34, MCR_OUT12_MON34, mcr),
PRIV_SW(OUT34_MON12, MCR_OUT34_MON12, mcr),
};
static int qtet_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct qtet_kcontrol_private private =
qtet_privates[kcontrol->private_value];
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = ARRAY_SIZE(private.texts);
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item =
uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
private.texts[uinfo->value.enumerated.item]);
return 0;
}
static int qtet_sw_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct qtet_kcontrol_private private =
qtet_privates[kcontrol->private_value];
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] =
(private.get_register(ice) & private.bit) ? 1 : 0;
return 0;
}
static int qtet_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct qtet_kcontrol_private private =
qtet_privates[kcontrol->private_value];
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int old, new;
old = private.get_register(ice);
if (ucontrol->value.integer.value[0])
new = old | private.bit;
else
new = old & ~private.bit;
if (old != new) {
private.set_register(ice, new);
return 1;
}
/* no change */
return 0;
}
#define qtet_sw_info snd_ctl_boolean_mono_info
#define QTET_CONTROL(xname, xtype, xpriv) \
{.iface = SNDRV_CTL_ELEM_IFACE_MIXER,\
.name = xname,\
.info = qtet_##xtype##_info,\
.get = qtet_sw_get,\
.put = qtet_sw_put,\
.private_value = xpriv }
static struct snd_kcontrol_new qtet_controls[] __devinitdata = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.info = qtet_sw_info,
.get = qtet_mute_get,
.put = qtet_mute_put,
.private_value = 0
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Phantom Power",
.info = qtet_sw_info,
.get = qtet_php_get,
.put = qtet_php_put,
.private_value = 0
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog In 1/2 Capture Switch",
.info = qtet_ain12_enum_info,
.get = qtet_ain12_sw_get,
.put = qtet_ain12_sw_put,
.private_value = 0
},
QTET_CONTROL("Analog In 3/4 Capture Switch", enum, AIN34_SEL),
QTET_CONTROL("PCM In 1/2 Capture Switch", enum, IN12_SEL),
QTET_CONTROL("PCM In 3/4 Capture Switch", enum, IN34_SEL),
QTET_CONTROL("Coax Output Source", enum, COAX_OUT),
QTET_CONTROL("Analog In 1/2 to Monitor 1/2", sw, IN12_MON12),
QTET_CONTROL("Analog In 1/2 to Monitor 3/4", sw, IN12_MON34),
QTET_CONTROL("Analog In 3/4 to Monitor 1/2", sw, IN34_MON12),
QTET_CONTROL("Analog In 3/4 to Monitor 3/4", sw, IN34_MON34),
QTET_CONTROL("Output 1/2 to Monitor 3/4", sw, OUT12_MON34),
QTET_CONTROL("Output 3/4 to Monitor 1/2", sw, OUT34_MON12),
};
static char *slave_vols[] __devinitdata = {
PCM_12_PLAYBACK_VOLUME,
PCM_34_PLAYBACK_VOLUME,
NULL
};
static __devinitdata
DECLARE_TLV_DB_SCALE(qtet_master_db_scale, -6350, 50, 1);
static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card,
const char *name)
{
struct snd_ctl_elem_id sid;
memset(&sid, 0, sizeof(sid));
/* FIXME: strcpy is bad. */
strcpy(sid.name, name);
sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
return snd_ctl_find_id(card, &sid);
}
static void __devinit add_slaves(struct snd_card *card,
struct snd_kcontrol *master, char **list)
{
for (; *list; list++) {
struct snd_kcontrol *slave = ctl_find(card, *list);
if (slave)
snd_ctl_add_slave(master, slave);
}
}
static int __devinit qtet_add_controls(struct snd_ice1712 *ice)
{
struct qtet_spec *spec = ice->spec;
int err, i;
struct snd_kcontrol *vmaster;
err = snd_ice1712_akm4xxx_build_controls(ice);
if (err < 0)
return err;
for (i = 0; i < ARRAY_SIZE(qtet_controls); i++) {
err = snd_ctl_add(ice->card,
snd_ctl_new1(&qtet_controls[i], ice));
if (err < 0)
return err;
}
/* Create virtual master control */
vmaster = snd_ctl_make_virtual_master("Master Playback Volume",
qtet_master_db_scale);
if (!vmaster)
return -ENOMEM;
add_slaves(ice->card, vmaster, slave_vols);
err = snd_ctl_add(ice->card, vmaster);
if (err < 0)
return err;
/* only capture SPDIF over AK4113 */
err = snd_ak4113_build(spec->ak4113,
ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream);
if (err < 0)
return err;
return 0;
}
static inline int qtet_is_spdif_master(struct snd_ice1712 *ice)
{
/* CPLD_SYNC_SEL: 0 = internal, 1 = external (i.e. spdif master) */
return (get_cpld(ice) & CPLD_SYNC_SEL) ? 1 : 0;
}
static unsigned int qtet_get_rate(struct snd_ice1712 *ice)
{
int i;
unsigned char result;
result = get_cpld(ice) & CPLD_CKS_MASK;
for (i = 0; i < ARRAY_SIZE(cks_vals); i++)
if (cks_vals[i] == result)
return qtet_rates[i];
return 0;
}
static int get_cks_val(int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(qtet_rates); i++)
if (qtet_rates[i] == rate)
return cks_vals[i];
return 0;
}
/* setting new rate */
static void qtet_set_rate(struct snd_ice1712 *ice, unsigned int rate)
{
unsigned int new;
unsigned char val;
/* switching ice1724 to external clock - supplied by ext. circuits */
val = inb(ICEMT1724(ice, RATE));
outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE));
new = (get_cpld(ice) & ~CPLD_CKS_MASK) | get_cks_val(rate);
/* switch to internal clock, drop CPLD_SYNC_SEL */
new &= ~CPLD_SYNC_SEL;
/* printk(KERN_DEBUG "QT - set_rate: old %x, new %x\n",
get_cpld(ice), new); */
set_cpld(ice, new);
}
static inline unsigned char qtet_set_mclk(struct snd_ice1712 *ice,
unsigned int rate)
{
/* no change in master clock */
return 0;
}
/* setting clock to external - SPDIF */
static int qtet_set_spdif_clock(struct snd_ice1712 *ice, int type)
{
unsigned int old, new;
old = new = get_cpld(ice);
new &= ~(CPLD_CKS_MASK | CPLD_WORD_SEL);
switch (type) {
case EXT_SPDIF_TYPE:
new |= CPLD_EXT_SPDIF;
break;
case EXT_WORDCLOCK_1FS_TYPE:
new |= CPLD_EXT_WORDCLOCK_1FS;
break;
case EXT_WORDCLOCK_256FS_TYPE:
new |= CPLD_EXT_WORDCLOCK_256FS;
break;
default:
snd_BUG();
}
if (old != new) {
set_cpld(ice, new);
/* changed */
return 1;
}
return 0;
}
static int qtet_get_spdif_master_type(struct snd_ice1712 *ice)
{
unsigned int val;
int result;
val = get_cpld(ice);
/* checking only rate/clock-related bits */
val &= (CPLD_CKS_MASK | CPLD_WORD_SEL | CPLD_SYNC_SEL);
if (!(val & CPLD_SYNC_SEL)) {
/* switched to internal clock, is not any external type */
result = -1;
} else {
switch (val) {
case (CPLD_EXT_SPDIF):
result = EXT_SPDIF_TYPE;
break;
case (CPLD_EXT_WORDCLOCK_1FS):
result = EXT_WORDCLOCK_1FS_TYPE;
break;
case (CPLD_EXT_WORDCLOCK_256FS):
result = EXT_WORDCLOCK_256FS_TYPE;
break;
default:
/* undefined combination of external clock setup */
snd_BUG();
result = 0;
}
}
return result;
}
/* Called when ak4113 detects change in the input SPDIF stream */
static void qtet_ak4113_change(struct ak4113 *ak4113, unsigned char c0,
unsigned char c1)
{
struct snd_ice1712 *ice = ak4113->change_callback_private;
int rate;
if ((qtet_get_spdif_master_type(ice) == EXT_SPDIF_TYPE) &&
c1) {
/* only for SPDIF master mode, rate was changed */
rate = snd_ak4113_external_rate(ak4113);
/* printk(KERN_DEBUG "ak4113 - input rate changed to %d\n",
rate); */
qtet_akm_set_rate_val(ice->akm, rate);
}
}
/*
* If clock slaved to SPDIF-IN, setting runtime rate
* to the detected external rate
*/
static void qtet_spdif_in_open(struct snd_ice1712 *ice,
struct snd_pcm_substream *substream)
{
struct qtet_spec *spec = ice->spec;
struct snd_pcm_runtime *runtime = substream->runtime;
int rate;
if (qtet_get_spdif_master_type(ice) != EXT_SPDIF_TYPE)
/* not external SPDIF, no rate limitation */
return;
/* only external SPDIF can detect incoming sample rate */
rate = snd_ak4113_external_rate(spec->ak4113);
if (rate >= runtime->hw.rate_min && rate <= runtime->hw.rate_max) {
runtime->hw.rate_min = rate;
runtime->hw.rate_max = rate;
}
}
/*
* initialize the chip
*/
static int __devinit qtet_init(struct snd_ice1712 *ice)
{
static const unsigned char ak4113_init_vals[] = {
/* AK4113_REG_PWRDN */ AK4113_RST | AK4113_PWN |
AK4113_OCKS0 | AK4113_OCKS1,
/* AK4113_REQ_FORMAT */ AK4113_DIF_I24I2S | AK4113_VTX |
AK4113_DEM_OFF | AK4113_DEAU,
/* AK4113_REG_IO0 */ AK4113_OPS2 | AK4113_TXE |
AK4113_XTL_24_576M,
/* AK4113_REG_IO1 */ AK4113_EFH_1024LRCLK | AK4113_IPS(0),
/* AK4113_REG_INT0_MASK */ 0,
/* AK4113_REG_INT1_MASK */ 0,
/* AK4113_REG_DATDTS */ 0,
};
int err;
struct qtet_spec *spec;
struct snd_akm4xxx *ak;
unsigned char val;
/* switching ice1724 to external clock - supplied by ext. circuits */
val = inb(ICEMT1724(ice, RATE));
outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE));
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* qtet is clocked by Xilinx array */
ice->hw_rates = &qtet_rates_info;
ice->is_spdif_master = qtet_is_spdif_master;
ice->get_rate = qtet_get_rate;
ice->set_rate = qtet_set_rate;
ice->set_mclk = qtet_set_mclk;
ice->set_spdif_clock = qtet_set_spdif_clock;
ice->get_spdif_master_type = qtet_get_spdif_master_type;
ice->ext_clock_names = ext_clock_names;
ice->ext_clock_count = ARRAY_SIZE(ext_clock_names);
/* since Qtet can detect correct SPDIF-in rate, all streams can be
* limited to this specific rate */
ice->spdif.ops.open = ice->pro_open = qtet_spdif_in_open;
ice->spec = spec;
/* Mute Off */
/* SCR Initialize*/
/* keep codec power down first */
set_scr(ice, SCR_PHP);
udelay(1);
/* codec power up */
set_scr(ice, SCR_PHP | SCR_CODEC_PDN);
/* MCR Initialize */
set_mcr(ice, 0);
/* CPLD Initialize */
set_cpld(ice, 0);
ice->num_total_dacs = 2;
ice->num_total_adcs = 2;
ice->akm = kcalloc(2, sizeof(struct snd_akm4xxx), GFP_KERNEL);
ak = ice->akm;
if (!ak)
return -ENOMEM;
/* only one codec with two chips */
ice->akm_codecs = 1;
err = snd_ice1712_akm4xxx_init(ak, &akm_qtet_dac, NULL, ice);
if (err < 0)
return err;
err = snd_ak4113_create(ice->card,
qtet_ak4113_read,
qtet_ak4113_write,
ak4113_init_vals,
ice, &spec->ak4113);
if (err < 0)
return err;
/* callback for codecs rate setting */
spec->ak4113->change_callback = qtet_ak4113_change;
spec->ak4113->change_callback_private = ice;
/* AK41143 in Quartet can detect external rate correctly
* (i.e. check_flags = 0) */
spec->ak4113->check_flags = 0;
proc_init(ice);
qtet_set_rate(ice, 44100);
return 0;
}
static unsigned char qtet_eeprom[] __devinitdata = {
[ICE_EEP2_SYSCONF] = 0x28, /* clock 256(24MHz), mpu401, 1xADC,
1xDACs, SPDIF in */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0x78, /* 96k, 24bit, 192k */
[ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, in, out-ext */
[ICE_EEP2_GPIO_DIR] = 0x00, /* 0-7 inputs, switched to output
only during output operations */
[ICE_EEP2_GPIO_DIR1] = 0xff, /* 8-15 outputs */
[ICE_EEP2_GPIO_DIR2] = 0x00,
[ICE_EEP2_GPIO_MASK] = 0xff, /* changed only for OUT operations */
[ICE_EEP2_GPIO_MASK1] = 0x00,
[ICE_EEP2_GPIO_MASK2] = 0xff,
[ICE_EEP2_GPIO_STATE] = 0x00, /* inputs */
[ICE_EEP2_GPIO_STATE1] = 0x7d, /* all 1, but GPIO_CPLD_RW
and GPIO15 always zero */
[ICE_EEP2_GPIO_STATE2] = 0x00, /* inputs */
};
/* entry point */
struct snd_ice1712_card_info snd_vt1724_qtet_cards[] __devinitdata = {
{
.subvendor = VT1724_SUBDEVICE_QTET,
.name = "Infrasonic Quartet",
.model = "quartet",
.chip_init = qtet_init,
.build_controls = qtet_add_controls,
.eeprom_size = sizeof(qtet_eeprom),
.eeprom_data = qtet_eeprom,
},
{ } /* terminator */
};
| gpl-2.0 |
fishears/LG-V500-Kernel | arch/mips/pci/pci-bcm1480ht.c | 9281 | 5859 | /*
* Copyright (C) 2001,2002,2005 Broadcom Corporation
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* BCM1480/1455-specific HT support (looking like PCI)
*
* This module provides the glue between Linux's PCI subsystem
* and the hardware. We basically provide glue for accessing
* configuration space, and set up the translation for I/O
* space accesses.
*
* To access configuration space, we use ioremap. In the 32-bit
* kernel, this consumes either 4 or 8 page table pages, and 16MB of
* kernel mapped memory. Hopefully neither of these should be a huge
* problem.
*
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/board.h>
#include <asm/io.h>
/*
* Macros for calculating offsets into config space given a device
* structure or dev/fun/reg
*/
#define CFGOFFSET(bus, devfn, where) (((bus)<<16)+((devfn)<<8)+(where))
#define CFGADDR(bus, devfn, where) CFGOFFSET((bus)->number, (devfn), where)
static void *ht_cfg_space;
#define PCI_BUS_ENABLED 1
#define PCI_DEVICE_MODE 2
static int bcm1480ht_bus_status;
#define PCI_BRIDGE_DEVICE 0
#define HT_BRIDGE_DEVICE 1
/*
* HT's level-sensitive interrupts require EOI, which is generated
* through a 4MB memory-mapped region
*/
unsigned long ht_eoi_space;
/*
* Read/write 32-bit values in config space.
*/
static inline u32 READCFG32(u32 addr)
{
return *(u32 *)(ht_cfg_space + (addr&~3));
}
static inline void WRITECFG32(u32 addr, u32 data)
{
*(u32 *)(ht_cfg_space + (addr & ~3)) = data;
}
/*
* Some checks before doing config cycles:
* In PCI Device Mode, hide everything on bus 0 except the LDT host
* bridge. Otherwise, access is controlled by bridge MasterEn bits.
*/
static int bcm1480ht_can_access(struct pci_bus *bus, int devfn)
{
u32 devno;
if (!(bcm1480ht_bus_status & (PCI_BUS_ENABLED | PCI_DEVICE_MODE)))
return 0;
if (bus->number == 0) {
devno = PCI_SLOT(devfn);
if (bcm1480ht_bus_status & PCI_DEVICE_MODE)
return 0;
}
return 1;
}
/*
* Read/write access functions for various sizes of values
* in config space. Return all 1's for disallowed accesses
* for a kludgy but adequate simulation of master aborts.
*/
static int bcm1480ht_pcibios_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 * val)
{
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (bcm1480ht_can_access(bus, devfn))
data = READCFG32(CFGADDR(bus, devfn, where));
else
data = 0xFFFFFFFF;
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 3) << 3)) & 0xffff;
else
*val = data;
return PCIBIOS_SUCCESSFUL;
}
static int bcm1480ht_pcibios_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
u32 cfgaddr = CFGADDR(bus, devfn, where);
u32 data = 0;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (!bcm1480ht_can_access(bus, devfn))
return PCIBIOS_BAD_REGISTER_NUMBER;
data = READCFG32(cfgaddr);
if (size == 1)
data = (data & ~(0xff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else if (size == 2)
data = (data & ~(0xffff << ((where & 3) << 3))) |
(val << ((where & 3) << 3));
else
data = val;
WRITECFG32(cfgaddr, data);
return PCIBIOS_SUCCESSFUL;
}
static int bcm1480ht_pcibios_get_busno(void)
{
return 0;
}
struct pci_ops bcm1480ht_pci_ops = {
.read = bcm1480ht_pcibios_read,
.write = bcm1480ht_pcibios_write,
};
static struct resource bcm1480ht_mem_resource = {
.name = "BCM1480 HT MEM",
.start = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES,
.end = A_BCM1480_PHYS_HT_MEM_MATCH_BYTES + 0x1fffffffUL,
.flags = IORESOURCE_MEM,
};
static struct resource bcm1480ht_io_resource = {
.name = "BCM1480 HT I/O",
.start = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
.end = A_BCM1480_PHYS_HT_IO_MATCH_BYTES + 0x01ffffffUL,
.flags = IORESOURCE_IO,
};
struct pci_controller bcm1480ht_controller = {
.pci_ops = &bcm1480ht_pci_ops,
.mem_resource = &bcm1480ht_mem_resource,
.io_resource = &bcm1480ht_io_resource,
.index = 1,
.get_busno = bcm1480ht_pcibios_get_busno,
.io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
};
static int __init bcm1480ht_pcibios_init(void)
{
ht_cfg_space = ioremap(A_BCM1480_PHYS_HT_CFG_MATCH_BITS, 16*1024*1024);
/* CFE doesn't always init all HT paths, so we always scan */
bcm1480ht_bus_status |= PCI_BUS_ENABLED;
ht_eoi_space = (unsigned long)
ioremap(A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES,
4 * 1024 * 1024);
bcm1480ht_controller.io_map_base = (unsigned long)
ioremap(A_BCM1480_PHYS_HT_IO_MATCH_BYTES, 65536);
bcm1480ht_controller.io_map_base -= bcm1480ht_controller.io_offset;
register_pci_controller(&bcm1480ht_controller);
return 0;
}
arch_initcall(bcm1480ht_pcibios_init);
| gpl-2.0 |
Evil-Green/Ptah-GT-I9300 | net/bridge/netfilter/ebt_vlan.c | 10817 | 5574 | /*
* Description: EBTables 802.1Q match extension kernelspace module.
* Authors: Nick Fedchik <nick@fedchik.org.ua>
* Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_vlan.h>
#define MODULE_VERS "0.6"
MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>");
MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match");
MODULE_LICENSE("GPL");
#define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_
#define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; }
static bool
ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_vlan_info *info = par->matchinfo;
unsigned short TCI; /* Whole TCI, given from parsed frame */
unsigned short id; /* VLAN ID, given from frame TCI */
unsigned char prio; /* user_priority, given from frame TCI */
/* VLAN encapsulated Type/Length field, given from orig frame */
__be16 encap;
if (vlan_tx_tag_present(skb)) {
TCI = vlan_tx_tag_get(skb);
encap = skb->protocol;
} else {
const struct vlan_hdr *fp;
struct vlan_hdr _frame;
fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame);
if (fp == NULL)
return false;
TCI = ntohs(fp->h_vlan_TCI);
encap = fp->h_vlan_encapsulated_proto;
}
/* Tag Control Information (TCI) consists of the following elements:
* - User_priority. The user_priority field is three bits in length,
* interpreted as a binary number.
* - Canonical Format Indicator (CFI). The Canonical Format Indicator
* (CFI) is a single bit flag value. Currently ignored.
* - VLAN Identifier (VID). The VID is encoded as
* an unsigned binary number. */
id = TCI & VLAN_VID_MASK;
prio = (TCI >> 13) & 0x7;
/* Checking VLAN Identifier (VID) */
if (GET_BITMASK(EBT_VLAN_ID))
EXIT_ON_MISMATCH(id, EBT_VLAN_ID);
/* Checking user_priority */
if (GET_BITMASK(EBT_VLAN_PRIO))
EXIT_ON_MISMATCH(prio, EBT_VLAN_PRIO);
/* Checking Encapsulated Proto (Length/Type) field */
if (GET_BITMASK(EBT_VLAN_ENCAP))
EXIT_ON_MISMATCH(encap, EBT_VLAN_ENCAP);
return true;
}
static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_vlan_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
/* Is it 802.1Q frame checked? */
if (e->ethproto != htons(ETH_P_8021Q)) {
pr_debug("passed entry proto %2.4X is not 802.1Q (8100)\n",
ntohs(e->ethproto));
return -EINVAL;
}
/* Check for bitmask range
* True if even one bit is out of mask */
if (info->bitmask & ~EBT_VLAN_MASK) {
pr_debug("bitmask %2X is out of mask (%2X)\n",
info->bitmask, EBT_VLAN_MASK);
return -EINVAL;
}
/* Check for inversion flags range */
if (info->invflags & ~EBT_VLAN_MASK) {
pr_debug("inversion flags %2X is out of mask (%2X)\n",
info->invflags, EBT_VLAN_MASK);
return -EINVAL;
}
/* Reserved VLAN ID (VID) values
* -----------------------------
* 0 - The null VLAN ID.
* 1 - The default Port VID (PVID)
* 0x0FFF - Reserved for implementation use.
* if_vlan.h: VLAN_N_VID 4096. */
if (GET_BITMASK(EBT_VLAN_ID)) {
if (!!info->id) { /* if id!=0 => check vid range */
if (info->id > VLAN_N_VID) {
pr_debug("id %d is out of range (1-4096)\n",
info->id);
return -EINVAL;
}
/* Note: This is valid VLAN-tagged frame point.
* Any value of user_priority are acceptable,
* but should be ignored according to 802.1Q Std.
* So we just drop the prio flag. */
info->bitmask &= ~EBT_VLAN_PRIO;
}
/* Else, id=0 (null VLAN ID) => user_priority range (any?) */
}
if (GET_BITMASK(EBT_VLAN_PRIO)) {
if ((unsigned char) info->prio > 7) {
pr_debug("prio %d is out of range (0-7)\n",
info->prio);
return -EINVAL;
}
}
/* Check for encapsulated proto range - it is possible to be
* any value for u_short range.
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */
if (GET_BITMASK(EBT_VLAN_ENCAP)) {
if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
pr_debug("encap frame length %d is less than "
"minimal\n", ntohs(info->encap));
return -EINVAL;
}
}
return 0;
}
static struct xt_match ebt_vlan_mt_reg __read_mostly = {
.name = "vlan",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_vlan_mt,
.checkentry = ebt_vlan_mt_check,
.matchsize = sizeof(struct ebt_vlan_info),
.me = THIS_MODULE,
};
static int __init ebt_vlan_init(void)
{
pr_debug("ebtables 802.1Q extension module v" MODULE_VERS "\n");
return xt_register_match(&ebt_vlan_mt_reg);
}
static void __exit ebt_vlan_fini(void)
{
xt_unregister_match(&ebt_vlan_mt_reg);
}
module_init(ebt_vlan_init);
module_exit(ebt_vlan_fini);
| gpl-2.0 |
KiWiX-s2/kernel | drivers/net/wan/lmc/lmc_debug.c | 14657 | 1815 | #include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include "lmc_debug.h"
/*
* Prints out len, max to 80 octets using printk, 20 per line
*/
#ifdef DEBUG
#ifdef LMC_PACKET_LOG
void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
{
int iNewLine = 1;
char str[80], *pstr;
sprintf(str, KERN_DEBUG "lmc: %s: ", type);
pstr = str+strlen(str);
if(iLen > 240){
printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
iLen = 240;
}
else{
printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
}
while(iLen > 0)
{
sprintf(pstr, "%02x ", *ucData);
pstr+=3;
ucData++;
if( !(iNewLine % 20))
{
sprintf(pstr, "\n");
printk(str);
sprintf(str, KERN_DEBUG "lmc: %s: ", type);
pstr=str+strlen(str);
}
iNewLine++;
iLen--;
}
sprintf(pstr, "\n");
printk(str);
}
#endif
#endif
#ifdef DEBUG
u32 lmcEventLogIndex;
u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
{
lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
lmcEventLogBuf[lmcEventLogIndex++] = arg2;
lmcEventLogBuf[lmcEventLogIndex++] = arg3;
lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
}
#endif /* DEBUG */
void lmc_trace(struct net_device *dev, char *msg){
#ifdef LMC_TRACE
unsigned long j = jiffies + 3; /* Wait for 50 ms */
if(in_interrupt()){
printk("%s: * %s\n", dev->name, msg);
// while(time_before(jiffies, j+10))
// ;
}
else {
printk("%s: %s\n", dev->name, msg);
while(time_before(jiffies, j))
schedule();
}
#endif
}
/* --------------------------- end if_lmc_linux.c ------------------------ */
| gpl-2.0 |
dhkim1027/iamroot-linux-arm10c | drivers/power/power_supply_core.c | 66 | 13520 | /*
* Universal power supply monitor class
*
* Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
* Copyright © 2004 Szabolcs Gyurko
* Copyright © 2003 Ian Molton <spyro@f2s.com>
*
* Modified: 2004, Oct Szabolcs Gyurko
*
* You may use this code as per GPL version 2
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/power_supply.h>
#include <linux/thermal.h>
#include "power_supply.h"
/* exported for the APM Power driver, APM emulation */
struct class *power_supply_class;
EXPORT_SYMBOL_GPL(power_supply_class);
static struct device_type power_supply_dev_type;
static bool __power_supply_is_supplied_by(struct power_supply *supplier,
struct power_supply *supply)
{
int i;
if (!supply->supplied_from && !supplier->supplied_to)
return false;
/* Support both supplied_to and supplied_from modes */
if (supply->supplied_from) {
if (!supplier->name)
return false;
for (i = 0; i < supply->num_supplies; i++)
if (!strcmp(supplier->name, supply->supplied_from[i]))
return true;
} else {
if (!supply->name)
return false;
for (i = 0; i < supplier->num_supplicants; i++)
if (!strcmp(supplier->supplied_to[i], supply->name))
return true;
}
return false;
}
static int __power_supply_changed_work(struct device *dev, void *data)
{
struct power_supply *psy = (struct power_supply *)data;
struct power_supply *pst = dev_get_drvdata(dev);
if (__power_supply_is_supplied_by(psy, pst)) {
if (pst->external_power_changed)
pst->external_power_changed(pst);
}
return 0;
}
static void power_supply_changed_work(struct work_struct *work)
{
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
class_for_each_device(power_supply_class, NULL, psy,
__power_supply_changed_work);
power_supply_update_leds(psy);
kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
}
void power_supply_changed(struct power_supply *psy)
{
dev_dbg(psy->dev, "%s\n", __func__);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
#ifdef CONFIG_OF
#include <linux/of.h>
static int __power_supply_populate_supplied_from(struct device *dev,
void *data)
{
struct power_supply *psy = (struct power_supply *)data;
struct power_supply *epsy = dev_get_drvdata(dev);
struct device_node *np;
int i = 0;
do {
np = of_parse_phandle(psy->of_node, "power-supplies", i++);
if (!np)
continue;
if (np == epsy->of_node) {
dev_info(psy->dev, "%s: Found supply : %s\n",
psy->name, epsy->name);
psy->supplied_from[i-1] = (char *)epsy->name;
psy->num_supplies++;
of_node_put(np);
break;
}
of_node_put(np);
} while (np);
return 0;
}
static int power_supply_populate_supplied_from(struct power_supply *psy)
{
int error;
error = class_for_each_device(power_supply_class, NULL, psy,
__power_supply_populate_supplied_from);
dev_dbg(psy->dev, "%s %d\n", __func__, error);
return error;
}
static int __power_supply_find_supply_from_node(struct device *dev,
void *data)
{
struct device_node *np = (struct device_node *)data;
struct power_supply *epsy = dev_get_drvdata(dev);
/* return error breaks out of class_for_each_device loop */
if (epsy->of_node == np)
return -EINVAL;
return 0;
}
static int power_supply_find_supply_from_node(struct device_node *supply_node)
{
int error;
struct device *dev;
struct class_dev_iter iter;
/*
* Use iterator to see if any other device is registered.
* This is required since class_for_each_device returns 0
* if there are no devices registered.
*/
class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
dev = class_dev_iter_next(&iter);
if (!dev)
return -EPROBE_DEFER;
/*
* We have to treat the return value as inverted, because if
* we return error on not found, then it won't continue looking.
* So we trick it by returning error on success to stop looking
* once the matching device is found.
*/
error = class_for_each_device(power_supply_class, NULL, supply_node,
__power_supply_find_supply_from_node);
return error ? 0 : -EPROBE_DEFER;
}
static int power_supply_check_supplies(struct power_supply *psy)
{
struct device_node *np;
int cnt = 0;
/* If there is already a list honor it */
if (psy->supplied_from && psy->num_supplies > 0)
return 0;
/* No device node found, nothing to do */
if (!psy->of_node)
return 0;
do {
int ret;
np = of_parse_phandle(psy->of_node, "power-supplies", cnt++);
if (!np)
continue;
ret = power_supply_find_supply_from_node(np);
if (ret) {
dev_dbg(psy->dev, "Failed to find supply, defer!\n");
of_node_put(np);
return -EPROBE_DEFER;
}
of_node_put(np);
} while (np);
/* All supplies found, allocate char ** array for filling */
psy->supplied_from = devm_kzalloc(psy->dev, sizeof(psy->supplied_from),
GFP_KERNEL);
if (!psy->supplied_from) {
dev_err(psy->dev, "Couldn't allocate memory for supply list\n");
return -ENOMEM;
}
*psy->supplied_from = devm_kzalloc(psy->dev, sizeof(char *) * cnt,
GFP_KERNEL);
if (!*psy->supplied_from) {
dev_err(psy->dev, "Couldn't allocate memory for supply list\n");
return -ENOMEM;
}
return power_supply_populate_supplied_from(psy);
}
#else
static inline int power_supply_check_supplies(struct power_supply *psy)
{
return 0;
}
#endif
static int __power_supply_am_i_supplied(struct device *dev, void *data)
{
union power_supply_propval ret = {0,};
struct power_supply *psy = (struct power_supply *)data;
struct power_supply *epsy = dev_get_drvdata(dev);
if (__power_supply_is_supplied_by(epsy, psy))
if (!epsy->get_property(epsy, POWER_SUPPLY_PROP_ONLINE, &ret)) {
if (ret.intval)
return ret.intval;
}
return 0;
}
int power_supply_am_i_supplied(struct power_supply *psy)
{
int error;
error = class_for_each_device(power_supply_class, NULL, psy,
__power_supply_am_i_supplied);
dev_dbg(psy->dev, "%s %d\n", __func__, error);
return error;
}
EXPORT_SYMBOL_GPL(power_supply_am_i_supplied);
static int __power_supply_is_system_supplied(struct device *dev, void *data)
{
union power_supply_propval ret = {0,};
struct power_supply *psy = dev_get_drvdata(dev);
unsigned int *count = data;
(*count)++;
if (psy->type != POWER_SUPPLY_TYPE_BATTERY) {
if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &ret))
return 0;
if (ret.intval)
return ret.intval;
}
return 0;
}
int power_supply_is_system_supplied(void)
{
int error;
unsigned int count = 0;
error = class_for_each_device(power_supply_class, NULL, &count,
__power_supply_is_system_supplied);
/*
* If no power class device was found at all, most probably we are
* running on a desktop system, so assume we are on mains power.
*/
if (count == 0)
return 1;
return error;
}
EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
int power_supply_set_battery_charged(struct power_supply *psy)
{
if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) {
psy->set_charged(psy);
return 0;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(power_supply_set_battery_charged);
static int power_supply_match_device_by_name(struct device *dev, const void *data)
{
const char *name = data;
struct power_supply *psy = dev_get_drvdata(dev);
return strcmp(psy->name, name) == 0;
}
struct power_supply *power_supply_get_by_name(const char *name)
{
struct device *dev = class_find_device(power_supply_class, NULL, name,
power_supply_match_device_by_name);
return dev ? dev_get_drvdata(dev) : NULL;
}
EXPORT_SYMBOL_GPL(power_supply_get_by_name);
int power_supply_powers(struct power_supply *psy, struct device *dev)
{
return sysfs_create_link(&psy->dev->kobj, &dev->kobj, "powers");
}
EXPORT_SYMBOL_GPL(power_supply_powers);
static void power_supply_dev_release(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
kfree(dev);
}
#ifdef CONFIG_THERMAL
static int power_supply_read_temp(struct thermal_zone_device *tzd,
unsigned long *temp)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
WARN_ON(tzd == NULL);
psy = tzd->devdata;
ret = psy->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
/* Convert tenths of degree Celsius to milli degree Celsius. */
if (!ret)
*temp = val.intval * 100;
return ret;
}
static struct thermal_zone_device_ops psy_tzd_ops = {
.get_temp = power_supply_read_temp,
};
static int psy_register_thermal(struct power_supply *psy)
{
int i;
/* Register battery zone device psy reports temperature */
for (i = 0; i < psy->num_properties; i++) {
if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) {
psy->tzd = thermal_zone_device_register(psy->name, 0, 0,
psy, &psy_tzd_ops, NULL, 0, 0);
if (IS_ERR(psy->tzd))
return PTR_ERR(psy->tzd);
break;
}
}
return 0;
}
static void psy_unregister_thermal(struct power_supply *psy)
{
if (IS_ERR_OR_NULL(psy->tzd))
return;
thermal_zone_device_unregister(psy->tzd);
}
/* thermal cooling device callbacks */
static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long *state)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
psy = tcd->devdata;
ret = psy->get_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
if (!ret)
*state = val.intval;
return ret;
}
static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long *state)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
psy = tcd->devdata;
ret = psy->get_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
if (!ret)
*state = val.intval;
return ret;
}
static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long state)
{
struct power_supply *psy;
union power_supply_propval val;
int ret;
psy = tcd->devdata;
val.intval = state;
ret = psy->set_property(psy,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
return ret;
}
static struct thermal_cooling_device_ops psy_tcd_ops = {
.get_max_state = ps_get_max_charge_cntl_limit,
.get_cur_state = ps_get_cur_chrage_cntl_limit,
.set_cur_state = ps_set_cur_charge_cntl_limit,
};
static int psy_register_cooler(struct power_supply *psy)
{
int i;
/* Register for cooling device if psy can control charging */
for (i = 0; i < psy->num_properties; i++) {
if (psy->properties[i] ==
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) {
psy->tcd = thermal_cooling_device_register(
(char *)psy->name,
psy, &psy_tcd_ops);
if (IS_ERR(psy->tcd))
return PTR_ERR(psy->tcd);
break;
}
}
return 0;
}
static void psy_unregister_cooler(struct power_supply *psy)
{
if (IS_ERR_OR_NULL(psy->tcd))
return;
thermal_cooling_device_unregister(psy->tcd);
}
#else
static int psy_register_thermal(struct power_supply *psy)
{
return 0;
}
static void psy_unregister_thermal(struct power_supply *psy)
{
}
static int psy_register_cooler(struct power_supply *psy)
{
return 0;
}
static void psy_unregister_cooler(struct power_supply *psy)
{
}
#endif
int power_supply_register(struct device *parent, struct power_supply *psy)
{
struct device *dev;
int rc;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
device_initialize(dev);
dev->class = power_supply_class;
dev->type = &power_supply_dev_type;
dev->parent = parent;
dev->release = power_supply_dev_release;
dev_set_drvdata(dev, psy);
psy->dev = dev;
INIT_WORK(&psy->changed_work, power_supply_changed_work);
rc = power_supply_check_supplies(psy);
if (rc) {
dev_info(dev, "Not all required supplies found, defer probe\n");
goto check_supplies_failed;
}
rc = kobject_set_name(&dev->kobj, "%s", psy->name);
if (rc)
goto kobject_set_name_failed;
rc = device_add(dev);
if (rc)
goto device_add_failed;
rc = psy_register_thermal(psy);
if (rc)
goto register_thermal_failed;
rc = psy_register_cooler(psy);
if (rc)
goto register_cooler_failed;
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
power_supply_changed(psy);
goto success;
create_triggers_failed:
psy_unregister_cooler(psy);
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
device_del(dev);
kobject_set_name_failed:
device_add_failed:
check_supplies_failed:
put_device(dev);
success:
return rc;
}
EXPORT_SYMBOL_GPL(power_supply_register);
void power_supply_unregister(struct power_supply *psy)
{
cancel_work_sync(&psy->changed_work);
sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
static int __init power_supply_class_init(void)
{
power_supply_class = class_create(THIS_MODULE, "power_supply");
if (IS_ERR(power_supply_class))
return PTR_ERR(power_supply_class);
power_supply_class->dev_uevent = power_supply_uevent;
power_supply_init_attrs(&power_supply_dev_type);
return 0;
}
static void __exit power_supply_class_exit(void)
{
class_destroy(power_supply_class);
}
subsys_initcall(power_supply_class_init);
module_exit(power_supply_class_exit);
MODULE_DESCRIPTION("Universal power supply monitor class");
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>, "
"Szabolcs Gyurko, "
"Anton Vorontsov <cbou@mail.ru>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
victoredwardocallaghan/xen | xen/common/xz/crc32.c | 66 | 1082 | /*
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
/*
* This is not the fastest implementation, but it is pretty compact.
* The fastest versions of xz_crc32() on modern CPUs without hardware
* accelerated CRC instruction are 3-5 times as fast as this version,
* but they are bigger and use more memory for the lookup table.
*/
#include "private.h"
XZ_EXTERN uint32_t INITDATA xz_crc32_table[256];
XZ_EXTERN void INIT xz_crc32_init(void)
{
const uint32_t poly = 0xEDB88320;
uint32_t i;
uint32_t j;
uint32_t r;
for (i = 0; i < 256; ++i) {
r = i;
for (j = 0; j < 8; ++j)
r = (r >> 1) ^ (poly & ~((r & 1) - 1));
xz_crc32_table[i] = r;
}
return;
}
XZ_EXTERN uint32_t INIT xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
{
crc = ~crc;
while (size != 0) {
crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
--size;
}
return ~crc;
}
| gpl-2.0 |
timemath/hmfs | drivers/usb/gadget/atmel_usba_udc.c | 66 | 51922 | /*
* Driver for the Atmel USBA high speed USB device controller
*
* Copyright (C) 2005-2007 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/atmel_usba_udc.h>
#include <linux/delay.h>
#include <linux/platform_data/atmel.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <asm/gpio.h>
#include "atmel_usba_udc.h"
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
static int queue_dbg_open(struct inode *inode, struct file *file)
{
struct usba_ep *ep = inode->i_private;
struct usba_request *req, *req_copy;
struct list_head *queue_data;
queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
if (!queue_data)
return -ENOMEM;
INIT_LIST_HEAD(queue_data);
spin_lock_irq(&ep->udc->lock);
list_for_each_entry(req, &ep->queue, queue) {
req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
if (!req_copy)
goto fail;
list_add_tail(&req_copy->queue, queue_data);
}
spin_unlock_irq(&ep->udc->lock);
file->private_data = queue_data;
return 0;
fail:
spin_unlock_irq(&ep->udc->lock);
list_for_each_entry_safe(req, req_copy, queue_data, queue) {
list_del(&req->queue);
kfree(req);
}
kfree(queue_data);
return -ENOMEM;
}
/*
* bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
*
* b: buffer address
* l: buffer length
* I/i: interrupt/no interrupt
* Z/z: zero/no zero
* S/s: short ok/short not ok
* s: status
* n: nr_packets
* F/f: submitted/not submitted to FIFO
* D/d: using/not using DMA
* L/l: last transaction/not last transaction
*/
static ssize_t queue_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct list_head *queue = file->private_data;
struct usba_request *req, *tmp_req;
size_t len, remaining, actual = 0;
char tmpbuf[38];
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EFAULT;
mutex_lock(&file_inode(file)->i_mutex);
list_for_each_entry_safe(req, tmp_req, queue, queue) {
len = snprintf(tmpbuf, sizeof(tmpbuf),
"%8p %08x %c%c%c %5d %c%c%c\n",
req->req.buf, req->req.length,
req->req.no_interrupt ? 'i' : 'I',
req->req.zero ? 'Z' : 'z',
req->req.short_not_ok ? 's' : 'S',
req->req.status,
req->submitted ? 'F' : 'f',
req->using_dma ? 'D' : 'd',
req->last_transaction ? 'L' : 'l');
len = min(len, sizeof(tmpbuf));
if (len > nbytes)
break;
list_del(&req->queue);
kfree(req);
remaining = __copy_to_user(buf, tmpbuf, len);
actual += len - remaining;
if (remaining)
break;
nbytes -= len;
buf += len;
}
mutex_unlock(&file_inode(file)->i_mutex);
return actual;
}
static int queue_dbg_release(struct inode *inode, struct file *file)
{
struct list_head *queue_data = file->private_data;
struct usba_request *req, *tmp_req;
list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
list_del(&req->queue);
kfree(req);
}
kfree(queue_data);
return 0;
}
static int regs_dbg_open(struct inode *inode, struct file *file)
{
struct usba_udc *udc;
unsigned int i;
u32 *data;
int ret = -ENOMEM;
mutex_lock(&inode->i_mutex);
udc = inode->i_private;
data = kmalloc(inode->i_size, GFP_KERNEL);
if (!data)
goto out;
spin_lock_irq(&udc->lock);
for (i = 0; i < inode->i_size / 4; i++)
data[i] = __raw_readl(udc->regs + i * 4);
spin_unlock_irq(&udc->lock);
file->private_data = data;
ret = 0;
out:
mutex_unlock(&inode->i_mutex);
return ret;
}
static ssize_t regs_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct inode *inode = file_inode(file);
int ret;
mutex_lock(&inode->i_mutex);
ret = simple_read_from_buffer(buf, nbytes, ppos,
file->private_data,
file_inode(file)->i_size);
mutex_unlock(&inode->i_mutex);
return ret;
}
static int regs_dbg_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
const struct file_operations queue_dbg_fops = {
.owner = THIS_MODULE,
.open = queue_dbg_open,
.llseek = no_llseek,
.read = queue_dbg_read,
.release = queue_dbg_release,
};
const struct file_operations regs_dbg_fops = {
.owner = THIS_MODULE,
.open = regs_dbg_open,
.llseek = generic_file_llseek,
.read = regs_dbg_read,
.release = regs_dbg_release,
};
static void usba_ep_init_debugfs(struct usba_udc *udc,
struct usba_ep *ep)
{
struct dentry *ep_root;
ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
if (!ep_root)
goto err_root;
ep->debugfs_dir = ep_root;
ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
ep, &queue_dbg_fops);
if (!ep->debugfs_queue)
goto err_queue;
if (ep->can_dma) {
ep->debugfs_dma_status
= debugfs_create_u32("dma_status", 0400, ep_root,
&ep->last_dma_status);
if (!ep->debugfs_dma_status)
goto err_dma_status;
}
if (ep_is_control(ep)) {
ep->debugfs_state
= debugfs_create_u32("state", 0400, ep_root,
&ep->state);
if (!ep->debugfs_state)
goto err_state;
}
return;
err_state:
if (ep->can_dma)
debugfs_remove(ep->debugfs_dma_status);
err_dma_status:
debugfs_remove(ep->debugfs_queue);
err_queue:
debugfs_remove(ep_root);
err_root:
dev_err(&ep->udc->pdev->dev,
"failed to create debugfs directory for %s\n", ep->ep.name);
}
static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
{
debugfs_remove(ep->debugfs_queue);
debugfs_remove(ep->debugfs_dma_status);
debugfs_remove(ep->debugfs_state);
debugfs_remove(ep->debugfs_dir);
ep->debugfs_dma_status = NULL;
ep->debugfs_dir = NULL;
}
static void usba_init_debugfs(struct usba_udc *udc)
{
struct dentry *root, *regs;
struct resource *regs_resource;
root = debugfs_create_dir(udc->gadget.name, NULL);
if (IS_ERR(root) || !root)
goto err_root;
udc->debugfs_root = root;
regs = debugfs_create_file("regs", 0400, root, udc, ®s_dbg_fops);
if (!regs)
goto err_regs;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
CTRL_IOMEM_ID);
regs->d_inode->i_size = resource_size(regs_resource);
udc->debugfs_regs = regs;
usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
return;
err_regs:
debugfs_remove(root);
err_root:
udc->debugfs_root = NULL;
dev_err(&udc->pdev->dev, "debugfs is not available\n");
}
static void usba_cleanup_debugfs(struct usba_udc *udc)
{
usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
debugfs_remove(udc->debugfs_regs);
debugfs_remove(udc->debugfs_root);
udc->debugfs_regs = NULL;
udc->debugfs_root = NULL;
}
#else
static inline void usba_ep_init_debugfs(struct usba_udc *udc,
struct usba_ep *ep)
{
}
static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
{
}
static inline void usba_init_debugfs(struct usba_udc *udc)
{
}
static inline void usba_cleanup_debugfs(struct usba_udc *udc)
{
}
#endif
static int vbus_is_present(struct usba_udc *udc)
{
if (gpio_is_valid(udc->vbus_pin))
return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
/* No Vbus detection: Assume always present */
return 1;
}
#if defined(CONFIG_ARCH_AT91SAM9RL)
#include <mach/at91_pmc.h>
static void toggle_bias(int is_on)
{
unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
if (is_on)
at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
else
at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
}
#else
static void toggle_bias(int is_on)
{
}
#endif /* CONFIG_ARCH_AT91SAM9RL */
static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
{
unsigned int transaction_len;
transaction_len = req->req.length - req->req.actual;
req->last_transaction = 1;
if (transaction_len > ep->ep.maxpacket) {
transaction_len = ep->ep.maxpacket;
req->last_transaction = 0;
} else if (transaction_len == ep->ep.maxpacket && req->req.zero)
req->last_transaction = 0;
DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
ep->ep.name, req, transaction_len,
req->last_transaction ? ", done" : "");
memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
req->req.actual += transaction_len;
}
static void submit_request(struct usba_ep *ep, struct usba_request *req)
{
DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
ep->ep.name, req, req->req.length);
req->req.actual = 0;
req->submitted = 1;
if (req->using_dma) {
if (req->req.length == 0) {
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
return;
}
if (req->req.zero)
usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
else
usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
usba_dma_writel(ep, ADDRESS, req->req.dma);
usba_dma_writel(ep, CONTROL, req->ctrl);
} else {
next_fifo_transaction(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
} else {
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
}
}
}
static void submit_next_request(struct usba_ep *ep)
{
struct usba_request *req;
if (list_empty(&ep->queue)) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
return;
}
req = list_entry(ep->queue.next, struct usba_request, queue);
if (!req->submitted)
submit_request(ep, req);
}
static void send_status(struct usba_udc *udc, struct usba_ep *ep)
{
ep->state = STATUS_STAGE_IN;
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
}
static void receive_data(struct usba_ep *ep)
{
struct usba_udc *udc = ep->udc;
struct usba_request *req;
unsigned long status;
unsigned int bytecount, nr_busy;
int is_complete = 0;
status = usba_ep_readl(ep, STA);
nr_busy = USBA_BFEXT(BUSY_BANKS, status);
DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
while (nr_busy > 0) {
if (list_empty(&ep->queue)) {
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
break;
}
req = list_entry(ep->queue.next,
struct usba_request, queue);
bytecount = USBA_BFEXT(BYTE_COUNT, status);
if (status & (1 << 31))
is_complete = 1;
if (req->req.actual + bytecount >= req->req.length) {
is_complete = 1;
bytecount = req->req.length - req->req.actual;
}
memcpy_fromio(req->req.buf + req->req.actual,
ep->fifo, bytecount);
req->req.actual += bytecount;
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
if (is_complete) {
DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
req->req.status = 0;
list_del_init(&req->queue);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
spin_unlock(&udc->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
status = usba_ep_readl(ep, STA);
nr_busy = USBA_BFEXT(BUSY_BANKS, status);
if (is_complete && ep_is_control(ep)) {
send_status(udc, ep);
break;
}
}
}
static void
request_complete(struct usba_ep *ep, struct usba_request *req, int status)
{
struct usba_udc *udc = ep->udc;
WARN_ON(!list_empty(&req->queue));
if (req->req.status == -EINPROGRESS)
req->req.status = status;
if (req->using_dma)
usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
DBG(DBG_GADGET | DBG_REQ,
"%s: req %p complete: status %d, actual %u\n",
ep->ep.name, req, req->req.status, req->req.actual);
spin_unlock(&udc->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
static void
request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
{
struct usba_request *req, *tmp_req;
list_for_each_entry_safe(req, tmp_req, list, queue) {
list_del_init(&req->queue);
request_complete(ep, req, status);
}
}
static int
usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags, ept_cfg, maxpacket;
unsigned int nr_trans;
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
|| ep->index == 0
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| maxpacket == 0
|| maxpacket > ep->fifo_size) {
DBG(DBG_ERR, "ep_enable: Invalid argument");
return -EINVAL;
}
ep->is_isoc = 0;
ep->is_in = 0;
if (maxpacket <= 8)
ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
else
/* LSB is bit 1, not 0 */
ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
ep->ep.name, ept_cfg, maxpacket);
if (usb_endpoint_dir_in(desc)) {
ep->is_in = 1;
ept_cfg |= USBA_EPT_DIR_IN;
}
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
break;
case USB_ENDPOINT_XFER_ISOC:
if (!ep->can_isoc) {
DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
ep->ep.name);
return -EINVAL;
}
/*
* Bits 11:12 specify number of _additional_
* transactions per microframe.
*/
nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
if (nr_trans > 3)
return -EINVAL;
ep->is_isoc = 1;
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
/*
* Do triple-buffering on high-bandwidth iso endpoints.
*/
if (nr_trans > 1 && ep->nr_banks == 3)
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
else
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
break;
case USB_ENDPOINT_XFER_BULK:
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
break;
case USB_ENDPOINT_XFER_INT:
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
break;
}
spin_lock_irqsave(&ep->udc->lock, flags);
ep->ep.desc = desc;
ep->ep.maxpacket = maxpacket;
usba_ep_writel(ep, CFG, ept_cfg);
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
if (ep->can_dma) {
u32 ctrl;
usba_writel(udc, INT_ENB,
(usba_readl(udc, INT_ENB)
| USBA_BF(EPT_INT, 1 << ep->index)
| USBA_BF(DMA_INT, 1 << ep->index)));
ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
usba_ep_writel(ep, CTL_ENB, ctrl);
} else {
usba_writel(udc, INT_ENB,
(usba_readl(udc, INT_ENB)
| USBA_BF(EPT_INT, 1 << ep->index)));
}
spin_unlock_irqrestore(&udc->lock, flags);
DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
(unsigned long)usba_ep_readl(ep, CFG));
DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
(unsigned long)usba_readl(udc, INT_ENB));
return 0;
}
static int usba_ep_disable(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
LIST_HEAD(req_list);
unsigned long flags;
DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
spin_lock_irqsave(&udc->lock, flags);
if (!ep->ep.desc) {
spin_unlock_irqrestore(&udc->lock, flags);
/* REVISIT because this driver disables endpoints in
* reset_all_endpoints() before calling disconnect(),
* most gadget drivers would trigger this non-error ...
*/
if (udc->gadget.speed != USB_SPEED_UNKNOWN)
DBG(DBG_ERR, "ep_disable: %s not enabled\n",
ep->ep.name);
return -EINVAL;
}
ep->ep.desc = NULL;
list_splice_init(&ep->queue, &req_list);
if (ep->can_dma) {
usba_dma_writel(ep, CONTROL, 0);
usba_dma_writel(ep, ADDRESS, 0);
usba_dma_readl(ep, STATUS);
}
usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
usba_writel(udc, INT_ENB,
usba_readl(udc, INT_ENB)
& ~USBA_BF(EPT_INT, 1 << ep->index));
request_complete_list(ep, &req_list, -ESHUTDOWN);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_request *
usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct usba_request *req;
DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_request *req = to_usba_req(_req);
DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
kfree(req);
}
static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
struct usba_request *req, gfp_t gfp_flags)
{
unsigned long flags;
int ret;
DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n",
ep->ep.name, req->req.length, req->req.dma,
req->req.zero ? 'Z' : 'z',
req->req.short_not_ok ? 'S' : 's',
req->req.no_interrupt ? 'I' : 'i');
if (req->req.length > 0x10000) {
/* Lengths from 0 to 65536 (inclusive) are supported */
DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
return -EINVAL;
}
ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in);
if (ret)
return ret;
req->using_dma = 1;
req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
| USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
if (ep->is_in)
req->ctrl |= USBA_DMA_END_BUF_EN;
/*
* Add this request to the queue and submit for DMA if
* possible. Check if we're still alive first -- we may have
* received a reset since last time we checked.
*/
ret = -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (ep->ep.desc) {
if (list_empty(&ep->queue))
submit_request(ep, req);
list_add_tail(&req->queue, &ep->queue);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int
usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct usba_request *req = to_usba_req(_req);
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags;
int ret;
DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
ep->ep.name, req, _req->length);
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN ||
!ep->ep.desc)
return -ESHUTDOWN;
req->submitted = 0;
req->using_dma = 0;
req->last_transaction = 0;
_req->status = -EINPROGRESS;
_req->actual = 0;
if (ep->can_dma)
return queue_dma(udc, ep, req, gfp_flags);
/* May have received a reset since last time we checked */
ret = -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (ep->ep.desc) {
list_add_tail(&req->queue, &ep->queue);
if ((!ep_is_control(ep) && ep->is_in) ||
(ep_is_control(ep)
&& (ep->state == DATA_STAGE_IN
|| ep->state == STATUS_STAGE_IN)))
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
else
usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static void
usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
{
req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
}
static int stop_dma(struct usba_ep *ep, u32 *pstatus)
{
unsigned int timeout;
u32 status;
/*
* Stop the DMA controller. When writing both CH_EN
* and LINK to 0, the other bits are not affected.
*/
usba_dma_writel(ep, CONTROL, 0);
/* Wait for the FIFO to empty */
for (timeout = 40; timeout; --timeout) {
status = usba_dma_readl(ep, STATUS);
if (!(status & USBA_DMA_CH_EN))
break;
udelay(1);
}
if (pstatus)
*pstatus = status;
if (timeout == 0) {
dev_err(&ep->udc->pdev->dev,
"%s: timed out waiting for DMA FIFO to empty\n",
ep->ep.name);
return -ETIMEDOUT;
}
return 0;
}
static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
struct usba_request *req = to_usba_req(_req);
unsigned long flags;
u32 status;
DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
ep->ep.name, req);
spin_lock_irqsave(&udc->lock, flags);
if (req->using_dma) {
/*
* If this request is currently being transferred,
* stop the DMA controller and reset the FIFO.
*/
if (ep->queue.next == &req->queue) {
status = usba_dma_readl(ep, STATUS);
if (status & USBA_DMA_CH_EN)
stop_dma(ep, &status);
#ifdef CONFIG_USB_GADGET_DEBUG_FS
ep->last_dma_status = status;
#endif
usba_writel(udc, EPT_RST, 1 << ep->index);
usba_update_req(ep, req, status);
}
}
/*
* Errors should stop the queue from advancing until the
* completion function returns.
*/
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
/* Process the next request if any */
submit_next_request(ep);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usba_ep_set_halt(struct usb_ep *_ep, int value)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags;
int ret = 0;
DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
value ? "set" : "clear");
if (!ep->ep.desc) {
DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
ep->ep.name);
return -ENODEV;
}
if (ep->is_isoc) {
DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
ep->ep.name);
return -ENOTTY;
}
spin_lock_irqsave(&udc->lock, flags);
/*
* We can't halt IN endpoints while there are still data to be
* transferred
*/
if (!list_empty(&ep->queue)
|| ((value && ep->is_in && (usba_ep_readl(ep, STA)
& USBA_BF(BUSY_BANKS, -1L))))) {
ret = -EAGAIN;
} else {
if (value)
usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
else
usba_ep_writel(ep, CLR_STA,
USBA_FORCE_STALL | USBA_TOGGLE_CLR);
usba_ep_readl(ep, STA);
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int usba_ep_fifo_status(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
}
static void usba_ep_fifo_flush(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
usba_writel(udc, EPT_RST, 1 << ep->index);
}
static const struct usb_ep_ops usba_ep_ops = {
.enable = usba_ep_enable,
.disable = usba_ep_disable,
.alloc_request = usba_ep_alloc_request,
.free_request = usba_ep_free_request,
.queue = usba_ep_queue,
.dequeue = usba_ep_dequeue,
.set_halt = usba_ep_set_halt,
.fifo_status = usba_ep_fifo_status,
.fifo_flush = usba_ep_fifo_flush,
};
static int usba_udc_get_frame(struct usb_gadget *gadget)
{
struct usba_udc *udc = to_usba_udc(gadget);
return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
}
static int usba_udc_wakeup(struct usb_gadget *gadget)
{
struct usba_udc *udc = to_usba_udc(gadget);
unsigned long flags;
u32 ctrl;
int ret = -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
ctrl = usba_readl(udc, CTRL);
usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int
usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
{
struct usba_udc *udc = to_usba_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
if (is_selfpowered)
udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
else
udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int atmel_usba_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static const struct usb_gadget_ops usba_udc_ops = {
.get_frame = usba_udc_get_frame,
.wakeup = usba_udc_wakeup,
.set_selfpowered = usba_udc_set_selfpowered,
.udc_start = atmel_usba_start,
.udc_stop = atmel_usba_stop,
};
static struct usb_endpoint_descriptor usba_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(64),
/* FIXME: I have no idea what to put here */
.bInterval = 1,
};
static void nop_release(struct device *dev)
{
}
struct usb_gadget usba_gadget_template = {
.ops = &usba_udc_ops,
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
.dev = {
.init_name = "gadget",
.release = nop_release,
},
};
/*
* Called with interrupts disabled and udc->lock held.
*/
static void reset_all_endpoints(struct usba_udc *udc)
{
struct usba_ep *ep;
struct usba_request *req, *tmp_req;
usba_writel(udc, EPT_RST, ~0UL);
ep = to_usba_ep(udc->gadget.ep0);
list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
}
/* NOTE: normally, the next call to the gadget driver is in
* charge of disabling endpoints... usually disconnect().
* The exception would be entering a high speed test mode.
*
* FIXME remove this code ... and retest thoroughly.
*/
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
if (ep->ep.desc) {
spin_unlock(&udc->lock);
usba_ep_disable(&ep->ep);
spin_lock(&udc->lock);
}
}
}
static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
{
struct usba_ep *ep;
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
return to_usba_ep(udc->gadget.ep0);
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
u8 bEndpointAddress;
if (!ep->ep.desc)
continue;
bEndpointAddress = ep->ep.desc->bEndpointAddress;
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
continue;
if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
== (wIndex & USB_ENDPOINT_NUMBER_MASK))
return ep;
}
return NULL;
}
/* Called with interrupts disabled and udc->lock held */
static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
{
usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
ep->state = WAIT_FOR_SETUP;
}
static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
{
if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
return 1;
return 0;
}
static inline void set_address(struct usba_udc *udc, unsigned int addr)
{
u32 regval;
DBG(DBG_BUS, "setting address %u...\n", addr);
regval = usba_readl(udc, CTRL);
regval = USBA_BFINS(DEV_ADDR, addr, regval);
usba_writel(udc, CTRL, regval);
}
static int do_test_mode(struct usba_udc *udc)
{
static const char test_packet_buffer[] = {
/* JKJKJKJK * 9 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* JJKKJJKK * 8 */
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
/* JJKKJJKK * 8 */
0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
/* JJJJJJJKKKKKKK * 8 */
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
/* JJJJJJJK * 8 */
0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
/* {JKKKKKKK * 10}, JK */
0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
};
struct usba_ep *ep;
struct device *dev = &udc->pdev->dev;
int test_mode;
test_mode = udc->test_mode;
/* Start from a clean slate */
reset_all_endpoints(udc);
switch (test_mode) {
case 0x0100:
/* Test_J */
usba_writel(udc, TST, USBA_TST_J_MODE);
dev_info(dev, "Entering Test_J mode...\n");
break;
case 0x0200:
/* Test_K */
usba_writel(udc, TST, USBA_TST_K_MODE);
dev_info(dev, "Entering Test_K mode...\n");
break;
case 0x0300:
/*
* Test_SE0_NAK: Force high-speed mode and set up ep0
* for Bulk IN transfers
*/
ep = &udc->usba_ep[0];
usba_writel(udc, TST,
USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
usba_ep_writel(ep, CFG,
USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
| USBA_EPT_DIR_IN
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
| USBA_BF(BK_NUMBER, 1));
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
set_protocol_stall(udc, ep);
dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
} else {
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
dev_info(dev, "Entering Test_SE0_NAK mode...\n");
}
break;
case 0x0400:
/* Test_Packet */
ep = &udc->usba_ep[0];
usba_ep_writel(ep, CFG,
USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
| USBA_EPT_DIR_IN
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
| USBA_BF(BK_NUMBER, 1));
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
set_protocol_stall(udc, ep);
dev_err(dev, "Test_Packet: ep0 not mapped\n");
} else {
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
usba_writel(udc, TST, USBA_TST_PKT_MODE);
memcpy_toio(ep->fifo, test_packet_buffer,
sizeof(test_packet_buffer));
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
dev_info(dev, "Entering Test_Packet mode...\n");
}
break;
default:
dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
return -EINVAL;
}
return 0;
}
/* Avoid overly long expressions */
static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
return true;
return false;
}
static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
return true;
return false;
}
static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
return true;
return false;
}
static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
struct usb_ctrlrequest *crq)
{
int retval = 0;
switch (crq->bRequest) {
case USB_REQ_GET_STATUS: {
u16 status;
if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
status = cpu_to_le16(udc->devstatus);
} else if (crq->bRequestType
== (USB_DIR_IN | USB_RECIP_INTERFACE)) {
status = cpu_to_le16(0);
} else if (crq->bRequestType
== (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
struct usba_ep *target;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
status = 0;
if (is_stalled(udc, target))
status |= cpu_to_le16(1);
} else
goto delegate;
/* Write directly to the FIFO. No queueing is done. */
if (crq->wLength != cpu_to_le16(sizeof(status)))
goto stall;
ep->state = DATA_STAGE_IN;
__raw_writew(status, ep->fifo);
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
break;
}
case USB_REQ_CLEAR_FEATURE: {
if (crq->bRequestType == USB_RECIP_DEVICE) {
if (feature_is_dev_remote_wakeup(crq))
udc->devstatus
&= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
else
/* Can't CLEAR_FEATURE TEST_MODE */
goto stall;
} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
struct usba_ep *target;
if (crq->wLength != cpu_to_le16(0)
|| !feature_is_ep_halt(crq))
goto stall;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
if (target->index != 0)
usba_ep_writel(target, CLR_STA,
USBA_TOGGLE_CLR);
} else {
goto delegate;
}
send_status(udc, ep);
break;
}
case USB_REQ_SET_FEATURE: {
if (crq->bRequestType == USB_RECIP_DEVICE) {
if (feature_is_dev_test_mode(crq)) {
send_status(udc, ep);
ep->state = STATUS_STAGE_TEST;
udc->test_mode = le16_to_cpu(crq->wIndex);
return 0;
} else if (feature_is_dev_remote_wakeup(crq)) {
udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
} else {
goto stall;
}
} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
struct usba_ep *target;
if (crq->wLength != cpu_to_le16(0)
|| !feature_is_ep_halt(crq))
goto stall;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
} else
goto delegate;
send_status(udc, ep);
break;
}
case USB_REQ_SET_ADDRESS:
if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
goto delegate;
set_address(udc, le16_to_cpu(crq->wValue));
send_status(udc, ep);
ep->state = STATUS_STAGE_ADDR;
break;
default:
delegate:
spin_unlock(&udc->lock);
retval = udc->driver->setup(&udc->gadget, crq);
spin_lock(&udc->lock);
}
return retval;
stall:
pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
"halting endpoint...\n",
ep->ep.name, crq->bRequestType, crq->bRequest,
le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
le16_to_cpu(crq->wLength));
set_protocol_stall(udc, ep);
return -1;
}
static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 epstatus;
u32 epctrl;
restart:
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
ep->ep.name, ep->state, epstatus, epctrl);
req = NULL;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct usba_request, queue);
if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
if (req->submitted)
next_fifo_transaction(ep, req);
else
submit_request(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
}
goto restart;
}
if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
switch (ep->state) {
case DATA_STAGE_IN:
usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = STATUS_STAGE_OUT;
break;
case STATUS_STAGE_ADDR:
/* Activate our new address */
usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
| USBA_FADDR_EN));
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
break;
case STATUS_STAGE_IN:
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, 0);
submit_next_request(ep);
}
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
break;
case STATUS_STAGE_TEST:
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
if (do_test_mode(udc))
set_protocol_stall(udc, ep);
break;
default:
pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
"halting endpoint...\n",
ep->ep.name, ep->state);
set_protocol_stall(udc, ep);
break;
}
goto restart;
}
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
switch (ep->state) {
case STATUS_STAGE_OUT:
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, 0);
}
ep->state = WAIT_FOR_SETUP;
break;
case DATA_STAGE_OUT:
receive_data(ep);
break;
default:
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
"halting endpoint...\n",
ep->ep.name, ep->state);
set_protocol_stall(udc, ep);
break;
}
goto restart;
}
if (epstatus & USBA_RX_SETUP) {
union {
struct usb_ctrlrequest crq;
unsigned long data[2];
} crq;
unsigned int pkt_len;
int ret;
if (ep->state != WAIT_FOR_SETUP) {
/*
* Didn't expect a SETUP packet at this
* point. Clean up any pending requests (which
* may be successful).
*/
int status = -EPROTO;
/*
* RXRDY and TXCOMP are dropped when SETUP
* packets arrive. Just pretend we received
* the status packet.
*/
if (ep->state == STATUS_STAGE_OUT
|| ep->state == STATUS_STAGE_IN) {
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
status = 0;
}
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, status);
}
}
pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
DBG(DBG_HW, "Packet length: %u\n", pkt_len);
if (pkt_len != sizeof(crq)) {
pr_warning("udc: Invalid packet length %u "
"(expected %zu)\n", pkt_len, sizeof(crq));
set_protocol_stall(udc, ep);
return;
}
DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
/* Free up one bank in the FIFO so that we can
* generate or receive a reply right away. */
usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
/* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
ep->state, crq.crq.bRequestType,
crq.crq.bRequest); */
if (crq.crq.bRequestType & USB_DIR_IN) {
/*
* The USB 2.0 spec states that "if wLength is
* zero, there is no data transfer phase."
* However, testusb #14 seems to actually
* expect a data phase even if wLength = 0...
*/
ep->state = DATA_STAGE_IN;
} else {
if (crq.crq.wLength != cpu_to_le16(0))
ep->state = DATA_STAGE_OUT;
else
ep->state = STATUS_STAGE_IN;
}
ret = -1;
if (ep->index == 0)
ret = handle_ep0_setup(udc, ep, &crq.crq);
else {
spin_unlock(&udc->lock);
ret = udc->driver->setup(&udc->gadget, &crq.crq);
spin_lock(&udc->lock);
}
DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
crq.crq.bRequestType, crq.crq.bRequest,
le16_to_cpu(crq.crq.wLength), ep->state, ret);
if (ret < 0) {
/* Let the host know that we failed */
set_protocol_stall(udc, ep);
}
}
}
static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 epstatus;
u32 epctrl;
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
if (list_empty(&ep->queue)) {
dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
return;
}
req = list_entry(ep->queue.next, struct usba_request, queue);
if (req->using_dma) {
/* Send a zero-length packet */
usba_ep_writel(ep, SET_STA,
USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_DIS,
USBA_TX_PK_RDY);
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
} else {
if (req->submitted)
next_fifo_transaction(ep, req);
else
submit_request(ep, req);
if (req->last_transaction) {
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
}
}
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
}
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
receive_data(ep);
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
}
}
static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 status, control, pending;
status = usba_dma_readl(ep, STATUS);
control = usba_dma_readl(ep, CONTROL);
#ifdef CONFIG_USB_GADGET_DEBUG_FS
ep->last_dma_status = status;
#endif
pending = status & control;
DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
if (status & USBA_DMA_CH_EN) {
dev_err(&udc->pdev->dev,
"DMA_CH_EN is set after transfer is finished!\n");
dev_err(&udc->pdev->dev,
"status=%#08x, pending=%#08x, control=%#08x\n",
status, pending, control);
/*
* try to pretend nothing happened. We might have to
* do something here...
*/
}
if (list_empty(&ep->queue))
/* Might happen if a reset comes along at the right moment */
return;
if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
req = list_entry(ep->queue.next, struct usba_request, queue);
usba_update_req(ep, req, status);
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
}
}
static irqreturn_t usba_udc_irq(int irq, void *devid)
{
struct usba_udc *udc = devid;
u32 status;
u32 dma_status;
u32 ep_status;
spin_lock(&udc->lock);
status = usba_readl(udc, INT_STA);
DBG(DBG_INT, "irq, status=%#08x\n", status);
if (status & USBA_DET_SUSPEND) {
toggle_bias(0);
usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
DBG(DBG_BUS, "Suspend detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (status & USBA_WAKE_UP) {
toggle_bias(1);
usba_writel(udc, INT_CLR, USBA_WAKE_UP);
DBG(DBG_BUS, "Wake Up CPU detected\n");
}
if (status & USBA_END_OF_RESUME) {
usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
DBG(DBG_BUS, "Resume detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
dma_status = USBA_BFEXT(DMA_INT, status);
if (dma_status) {
int i;
for (i = 1; i < USBA_NR_ENDPOINTS; i++)
if (dma_status & (1 << i))
usba_dma_irq(udc, &udc->usba_ep[i]);
}
ep_status = USBA_BFEXT(EPT_INT, status);
if (ep_status) {
int i;
for (i = 0; i < USBA_NR_ENDPOINTS; i++)
if (ep_status & (1 << i)) {
if (ep_is_control(&udc->usba_ep[i]))
usba_control_irq(udc, &udc->usba_ep[i]);
else
usba_ep_irq(udc, &udc->usba_ep[i]);
}
}
if (status & USBA_END_OF_RESET) {
struct usba_ep *ep0;
usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
reset_all_endpoints(udc);
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver->disconnect) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
if (status & USBA_HIGH_SPEED)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
DBG(DBG_BUS, "%s bus reset detected\n",
usb_speed_string(udc->gadget.speed));
ep0 = &udc->usba_ep[0];
ep0->ep.desc = &usba_ep0_desc;
ep0->state = WAIT_FOR_SETUP;
usba_ep_writel(ep0, CFG,
(USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
| USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
usba_ep_writel(ep0, CTL_ENB,
USBA_EPT_ENABLE | USBA_RX_SETUP);
usba_writel(udc, INT_ENB,
(usba_readl(udc, INT_ENB)
| USBA_BF(EPT_INT, 1)
| USBA_DET_SUSPEND
| USBA_END_OF_RESUME));
/*
* Unclear why we hit this irregularly, e.g. in usbtest,
* but it's clearly harmless...
*/
if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
dev_dbg(&udc->pdev->dev,
"ODD: EP0 configuration is invalid!\n");
}
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static irqreturn_t usba_vbus_irq(int irq, void *devid)
{
struct usba_udc *udc = devid;
int vbus;
/* debounce */
udelay(10);
spin_lock(&udc->lock);
/* May happen if Vbus pin toggles during probe() */
if (!udc->driver)
goto out;
vbus = vbus_is_present(udc);
if (vbus != udc->vbus_prev) {
if (vbus) {
toggle_bias(1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
} else {
udc->gadget.speed = USB_SPEED_UNKNOWN;
reset_all_endpoints(udc);
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
if (udc->driver->disconnect) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
}
udc->vbus_prev = vbus;
}
out:
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
clk_enable(udc->pclk);
clk_enable(udc->hclk);
DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
udc->vbus_prev = 0;
if (gpio_is_valid(udc->vbus_pin))
enable_irq(gpio_to_irq(udc->vbus_pin));
/* If Vbus is present, enable the controller and wait for reset */
spin_lock_irqsave(&udc->lock, flags);
if (vbus_is_present(udc) && udc->vbus_prev == 0) {
toggle_bias(1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int atmel_usba_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
if (gpio_is_valid(udc->vbus_pin))
disable_irq(gpio_to_irq(udc->vbus_pin));
spin_lock_irqsave(&udc->lock, flags);
udc->gadget.speed = USB_SPEED_UNKNOWN;
reset_all_endpoints(udc);
spin_unlock_irqrestore(&udc->lock, flags);
/* This will also disable the DP pullup */
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
udc->driver = NULL;
clk_disable(udc->hclk);
clk_disable(udc->pclk);
DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
return 0;
}
#ifdef CONFIG_OF
static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
u32 val;
const char *name;
enum of_gpio_flags flags;
struct device_node *np = pdev->dev.of_node;
struct device_node *pp;
int i, ret;
struct usba_ep *eps, *ep;
udc->num_ep = 0;
udc->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
&flags);
udc->vbus_pin_inverted = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
pp = NULL;
while ((pp = of_get_next_child(np, pp)))
udc->num_ep++;
eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * udc->num_ep,
GFP_KERNEL);
if (!eps)
return ERR_PTR(-ENOMEM);
udc->gadget.ep0 = &eps[0].ep;
INIT_LIST_HEAD(&eps[0].ep.ep_list);
pp = NULL;
i = 0;
while ((pp = of_get_next_child(np, pp))) {
ep = &eps[i];
ret = of_property_read_u32(pp, "reg", &val);
if (ret) {
dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret);
goto err;
}
ep->index = val;
ret = of_property_read_u32(pp, "atmel,fifo-size", &val);
if (ret) {
dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret);
goto err;
}
ep->fifo_size = val;
ret = of_property_read_u32(pp, "atmel,nb-banks", &val);
if (ret) {
dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret);
goto err;
}
ep->nr_banks = val;
ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
ret = of_property_read_string(pp, "name", &name);
ep->ep.name = name;
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
ep->ep.ops = &usba_ep_ops;
ep->ep.maxpacket = ep->fifo_size;
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
if (i)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
i++;
}
return eps;
err:
return ERR_PTR(ret);
}
#else
static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
return ERR_PTR(-ENOSYS);
}
#endif
static struct usba_ep * usba_udc_pdata(struct platform_device *pdev,
struct usba_udc *udc)
{
struct usba_platform_data *pdata = pdev->dev.platform_data;
struct usba_ep *eps;
int i;
if (!pdata)
return ERR_PTR(-ENXIO);
eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * pdata->num_ep,
GFP_KERNEL);
if (!eps)
return ERR_PTR(-ENOMEM);
udc->gadget.ep0 = &eps[0].ep;
udc->vbus_pin = pdata->vbus_pin;
udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
udc->num_ep = pdata->num_ep;
INIT_LIST_HEAD(&eps[0].ep.ep_list);
for (i = 0; i < pdata->num_ep; i++) {
struct usba_ep *ep = &eps[i];
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
ep->ep.ops = &usba_ep_ops;
ep->ep.name = pdata->ep[i].name;
ep->fifo_size = ep->ep.maxpacket = pdata->ep[i].fifo_size;
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
ep->nr_banks = pdata->ep[i].nr_banks;
ep->index = pdata->ep[i].index;
ep->can_dma = pdata->ep[i].can_dma;
ep->can_isoc = pdata->ep[i].can_isoc;
if (i)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
return eps;
}
static int __init usba_udc_probe(struct platform_device *pdev)
{
struct resource *regs, *fifo;
struct clk *pclk, *hclk;
struct usba_udc *udc;
int irq, ret, i;
udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
udc->gadget = usba_gadget_template;
INIT_LIST_HEAD(&udc->gadget.ep_list);
regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
if (!regs || !fifo)
return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(pclk))
return PTR_ERR(pclk);
hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(hclk)) {
ret = PTR_ERR(hclk);
goto err_get_hclk;
}
spin_lock_init(&udc->lock);
udc->pdev = pdev;
udc->pclk = pclk;
udc->hclk = hclk;
udc->vbus_pin = -ENODEV;
ret = -ENOMEM;
udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
goto err_map_regs;
}
dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
(unsigned long)regs->start, udc->regs);
udc->fifo = ioremap(fifo->start, resource_size(fifo));
if (!udc->fifo) {
dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
goto err_map_fifo;
}
dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
(unsigned long)fifo->start, udc->fifo);
platform_set_drvdata(pdev, udc);
/* Make sure we start from a clean slate */
clk_enable(pclk);
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
clk_disable(pclk);
if (pdev->dev.of_node)
udc->usba_ep = atmel_udc_of_init(pdev, udc);
else
udc->usba_ep = usba_udc_pdata(pdev, udc);
if (IS_ERR(udc->usba_ep)) {
ret = PTR_ERR(udc->usba_ep);
goto err_alloc_ep;
}
ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
if (ret) {
dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
irq, ret);
goto err_request_irq;
}
udc->irq = irq;
if (gpio_is_valid(udc->vbus_pin)) {
if (!devm_gpio_request(&pdev->dev, udc->vbus_pin, "atmel_usba_udc")) {
ret = request_irq(gpio_to_irq(udc->vbus_pin),
usba_vbus_irq, 0,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = -ENODEV;
dev_warn(&udc->pdev->dev,
"failed to request vbus irq; "
"assuming always on\n");
} else {
disable_irq(gpio_to_irq(udc->vbus_pin));
}
} else {
/* gpio_request fail so use -EINVAL for gpio_is_valid */
udc->vbus_pin = -EINVAL;
}
}
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret)
goto err_add_udc;
usba_init_debugfs(udc);
for (i = 1; i < udc->num_ep; i++)
usba_ep_init_debugfs(udc, &udc->usba_ep[i]);
return 0;
err_add_udc:
if (gpio_is_valid(udc->vbus_pin))
free_irq(gpio_to_irq(udc->vbus_pin), udc);
free_irq(irq, udc);
err_request_irq:
err_alloc_ep:
iounmap(udc->fifo);
err_map_fifo:
iounmap(udc->regs);
err_map_regs:
clk_put(hclk);
err_get_hclk:
clk_put(pclk);
return ret;
}
static int __exit usba_udc_remove(struct platform_device *pdev)
{
struct usba_udc *udc;
int i;
udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
for (i = 1; i < udc->num_ep; i++)
usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
usba_cleanup_debugfs(udc);
if (gpio_is_valid(udc->vbus_pin)) {
free_irq(gpio_to_irq(udc->vbus_pin), udc);
}
free_irq(udc->irq, udc);
iounmap(udc->fifo);
iounmap(udc->regs);
clk_put(udc->hclk);
clk_put(udc->pclk);
return 0;
}
#if defined(CONFIG_OF)
static const struct of_device_id atmel_udc_dt_ids[] = {
{ .compatible = "atmel,at91sam9rl-udc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
#endif
static struct platform_driver udc_driver = {
.remove = __exit_p(usba_udc_remove),
.driver = {
.name = "atmel_usba_udc",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_udc_dt_ids),
},
};
module_platform_driver_probe(udc_driver, usba_udc_probe);
MODULE_DESCRIPTION("Atmel USBA UDC driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_usba_udc");
| gpl-2.0 |
zcop/lg_f160s_custom_kernel | drivers/mfd/pm8038-core.c | 66 | 25539 | /*
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/msm_ssbi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/pm8xxx/pm8038.h>
#include <linux/mfd/pm8xxx/pm8921.h>
#include <linux/mfd/pm8xxx/core.h>
#include <linux/mfd/pm8xxx/regulator.h>
#define REG_HWREV 0x002 /* PMIC4 revision */
#define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */
#define REG_MPP_BASE 0x050
#define REG_RTC_BASE 0x11D
#define REG_IRQ_BASE 0x1BB
#define REG_SPK_BASE 0x253
#define REG_SPK_REGISTERS 3
#define REG_TEMP_ALARM_CTRL 0x01B
#define REG_TEMP_ALARM_PWM 0x09B
#define PM8038_VERSION_MASK 0xFFF0
#define PM8038_VERSION_VALUE 0x09F0
#define PM8038_REVISION_MASK 0x000F
#define REG_PM8038_PON_CNTRL_3 0x01D
#define SINGLE_IRQ_RESOURCE(_name, _irq) \
{ \
.name = _name, \
.start = _irq, \
.end = _irq, \
.flags = IORESOURCE_IRQ, \
}
struct pm8038 {
struct device *dev;
struct pm_irq_chip *irq_chip;
struct mfd_cell *mfd_regulators;
struct pm8xxx_regulator_core_platform_data *regulator_cdata;
u32 rev_registers;
u8 restart_reason;
};
static int pm8038_readb(const struct device *dev, u16 addr, u8 *val)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
return msm_ssbi_read(pmic->dev->parent, addr, val, 1);
}
static int pm8038_writeb(const struct device *dev, u16 addr, u8 val)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
return msm_ssbi_write(pmic->dev->parent, addr, &val, 1);
}
static int pm8038_read_buf(const struct device *dev, u16 addr, u8 *buf,
int cnt)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt);
}
static int pm8038_write_buf(const struct device *dev, u16 addr, u8 *buf,
int cnt)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt);
}
static int pm8038_read_irq_stat(const struct device *dev, int irq)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
return pm8xxx_get_irq_stat(pmic->irq_chip, irq);
}
static enum pm8xxx_version pm8038_get_version(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
enum pm8xxx_version version = -ENODEV;
if ((pmic->rev_registers & PM8038_VERSION_MASK) == PM8038_VERSION_VALUE)
version = PM8XXX_VERSION_8038;
return version;
}
static int pm8038_get_revision(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
return pmic->rev_registers & PM8038_REVISION_MASK;
}
static u8 pm8038_restart_reason(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8038_drvdata = dev_get_drvdata(dev);
const struct pm8038 *pmic = pm8038_drvdata->pm_chip_data;
return pmic->restart_reason;
}
static struct pm8xxx_drvdata pm8038_drvdata = {
.pmic_readb = pm8038_readb,
.pmic_writeb = pm8038_writeb,
.pmic_read_buf = pm8038_read_buf,
.pmic_write_buf = pm8038_write_buf,
.pmic_read_irq_stat = pm8038_read_irq_stat,
.pmic_get_version = pm8038_get_version,
.pmic_get_revision = pm8038_get_revision,
.pmic_restart_reason = pm8038_restart_reason,
};
static const struct resource gpio_cell_resources[] __devinitconst = {
[0] = {
.start = PM8038_IRQ_BLOCK_BIT(PM8038_GPIO_BLOCK_START, 0),
.end = PM8038_IRQ_BLOCK_BIT(PM8038_GPIO_BLOCK_START, 0)
+ PM8038_NR_GPIOS - 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell gpio_cell __devinitdata = {
.name = PM8XXX_GPIO_DEV_NAME,
.id = -1,
.resources = gpio_cell_resources,
.num_resources = ARRAY_SIZE(gpio_cell_resources),
};
static const struct resource adc_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE(NULL, PM8038_ADC_EOC_USR_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8038_ADC_BATT_TEMP_WARM_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8038_ADC_BATT_TEMP_COLD_IRQ),
};
static struct mfd_cell adc_cell __devinitdata = {
.name = PM8XXX_ADC_DEV_NAME,
.id = -1,
.resources = adc_cell_resources,
.num_resources = ARRAY_SIZE(adc_cell_resources),
};
static const struct resource charger_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("USBIN_VALID_IRQ", PM8921_USBIN_VALID_IRQ),
SINGLE_IRQ_RESOURCE("USBIN_OV_IRQ", PM8921_USBIN_OV_IRQ),
SINGLE_IRQ_RESOURCE("BATT_INSERTED_IRQ", PM8921_BATT_INSERTED_IRQ),
SINGLE_IRQ_RESOURCE("VBATDET_LOW_IRQ", PM8921_VBATDET_LOW_IRQ),
SINGLE_IRQ_RESOURCE("USBIN_UV_IRQ", PM8921_USBIN_UV_IRQ),
SINGLE_IRQ_RESOURCE("VBAT_OV_IRQ", PM8921_VBAT_OV_IRQ),
SINGLE_IRQ_RESOURCE("CHGWDOG_IRQ", PM8921_CHGWDOG_IRQ),
SINGLE_IRQ_RESOURCE("VCP_IRQ", PM8921_VCP_IRQ),
SINGLE_IRQ_RESOURCE("ATCDONE_IRQ", PM8921_ATCDONE_IRQ),
SINGLE_IRQ_RESOURCE("ATCFAIL_IRQ", PM8921_ATCFAIL_IRQ),
SINGLE_IRQ_RESOURCE("CHGDONE_IRQ", PM8921_CHGDONE_IRQ),
SINGLE_IRQ_RESOURCE("CHGFAIL_IRQ", PM8921_CHGFAIL_IRQ),
SINGLE_IRQ_RESOURCE("CHGSTATE_IRQ", PM8921_CHGSTATE_IRQ),
SINGLE_IRQ_RESOURCE("LOOP_CHANGE_IRQ", PM8921_LOOP_CHANGE_IRQ),
SINGLE_IRQ_RESOURCE("FASTCHG_IRQ", PM8921_FASTCHG_IRQ),
SINGLE_IRQ_RESOURCE("TRKLCHG_IRQ", PM8921_TRKLCHG_IRQ),
SINGLE_IRQ_RESOURCE("BATT_REMOVED_IRQ", PM8921_BATT_REMOVED_IRQ),
SINGLE_IRQ_RESOURCE("BATTTEMP_HOT_IRQ", PM8921_BATTTEMP_HOT_IRQ),
SINGLE_IRQ_RESOURCE("CHGHOT_IRQ", PM8921_CHGHOT_IRQ),
SINGLE_IRQ_RESOURCE("BATTTEMP_COLD_IRQ", PM8921_BATTTEMP_COLD_IRQ),
SINGLE_IRQ_RESOURCE("CHG_GONE_IRQ", PM8921_CHG_GONE_IRQ),
SINGLE_IRQ_RESOURCE("BAT_TEMP_OK_IRQ", PM8921_BAT_TEMP_OK_IRQ),
SINGLE_IRQ_RESOURCE("COARSE_DET_LOW_IRQ", PM8921_COARSE_DET_LOW_IRQ),
SINGLE_IRQ_RESOURCE("VDD_LOOP_IRQ", PM8921_VDD_LOOP_IRQ),
SINGLE_IRQ_RESOURCE("VREG_OV_IRQ", PM8921_VREG_OV_IRQ),
SINGLE_IRQ_RESOURCE("VBATDET_IRQ", PM8921_VBATDET_IRQ),
SINGLE_IRQ_RESOURCE("BATFET_IRQ", PM8921_BATFET_IRQ),
SINGLE_IRQ_RESOURCE("PSI_IRQ", PM8921_PSI_IRQ),
SINGLE_IRQ_RESOURCE("DCIN_VALID_IRQ", PM8921_DCIN_VALID_IRQ),
SINGLE_IRQ_RESOURCE("DCIN_OV_IRQ", PM8921_DCIN_OV_IRQ),
SINGLE_IRQ_RESOURCE("DCIN_UV_IRQ", PM8921_DCIN_UV_IRQ),
};
static const struct resource bms_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("PM8921_BMS_SBI_WRITE_OK", PM8921_BMS_SBI_WRITE_OK),
SINGLE_IRQ_RESOURCE("PM8921_BMS_CC_THR", PM8921_BMS_CC_THR),
SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_THR", PM8921_BMS_VSENSE_THR),
SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_FOR_R", PM8921_BMS_VSENSE_FOR_R),
SINGLE_IRQ_RESOURCE("PM8921_BMS_OCV_FOR_R", PM8921_BMS_OCV_FOR_R),
SINGLE_IRQ_RESOURCE("PM8921_BMS_GOOD_OCV", PM8921_BMS_GOOD_OCV),
SINGLE_IRQ_RESOURCE("PM8921_BMS_VSENSE_AVG", PM8921_BMS_VSENSE_AVG),
};
static struct mfd_cell charger_cell __devinitdata = {
.name = PM8921_CHARGER_DEV_NAME,
.id = -1,
.resources = charger_cell_resources,
.num_resources = ARRAY_SIZE(charger_cell_resources),
};
static struct mfd_cell bms_cell __devinitdata = {
.name = PM8921_BMS_DEV_NAME,
.id = -1,
.resources = bms_cell_resources,
.num_resources = ARRAY_SIZE(bms_cell_resources),
};
static const struct resource mpp_cell_resources[] __devinitconst = {
{
.start = PM8038_IRQ_BLOCK_BIT(PM8038_MPP_BLOCK_START, 0),
.end = PM8038_IRQ_BLOCK_BIT(PM8038_MPP_BLOCK_START, 0)
+ PM8038_NR_MPPS - 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell mpp_cell __devinitdata = {
.name = PM8XXX_MPP_DEV_NAME,
.id = 1,
.resources = mpp_cell_resources,
.num_resources = ARRAY_SIZE(mpp_cell_resources),
};
static const struct resource rtc_cell_resources[] __devinitconst = {
[0] = SINGLE_IRQ_RESOURCE(NULL, PM8038_RTC_ALARM_IRQ),
[1] = {
.name = "pmic_rtc_base",
.start = REG_RTC_BASE,
.end = REG_RTC_BASE,
.flags = IORESOURCE_IO,
},
};
static struct mfd_cell rtc_cell __devinitdata = {
.name = PM8XXX_RTC_DEV_NAME,
.id = -1,
.resources = rtc_cell_resources,
.num_resources = ARRAY_SIZE(rtc_cell_resources),
};
static const struct resource resources_pwrkey[] __devinitconst = {
SINGLE_IRQ_RESOURCE(NULL, PM8038_PWRKEY_REL_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8038_PWRKEY_PRESS_IRQ),
};
static struct mfd_cell pwrkey_cell __devinitdata = {
.name = PM8XXX_PWRKEY_DEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(resources_pwrkey),
.resources = resources_pwrkey,
};
static struct mfd_cell pwm_cell __devinitdata = {
.name = PM8XXX_PWM_DEV_NAME,
.id = -1,
};
static struct mfd_cell misc_cell __devinitdata = {
.name = PM8XXX_MISC_DEV_NAME,
.id = -1,
};
static struct mfd_cell leds_cell __devinitdata = {
.name = PM8XXX_LEDS_DEV_NAME,
.id = -1,
};
static const struct resource resources_spk[] __devinitconst = {
[0] = {
.name = PM8XXX_SPK_DEV_NAME,
.start = REG_SPK_BASE,
.end = REG_SPK_BASE + REG_SPK_REGISTERS,
.flags = IORESOURCE_IO,
},
};
static struct mfd_cell spk_cell __devinitdata = {
.name = PM8XXX_SPK_DEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(resources_spk),
.resources = resources_spk,
};
static struct mfd_cell debugfs_cell __devinitdata = {
.name = "pm8xxx-debug",
.id = 0,
.platform_data = "pm8038-dbg",
.pdata_size = sizeof("pm8038-dbg"),
};
static const struct resource thermal_alarm_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("pm8038_tempstat_irq", PM8038_TEMPSTAT_IRQ),
SINGLE_IRQ_RESOURCE("pm8038_overtemp_irq", PM8038_OVERTEMP_IRQ),
};
static struct pm8xxx_tm_core_data thermal_alarm_cdata = {
.adc_channel = CHANNEL_DIE_TEMP,
.adc_type = PM8XXX_TM_ADC_PM8XXX_ADC,
.reg_addr_temp_alarm_ctrl = REG_TEMP_ALARM_CTRL,
.reg_addr_temp_alarm_pwm = REG_TEMP_ALARM_PWM,
.tm_name = "pm8038_tz",
.irq_name_temp_stat = "pm8038_tempstat_irq",
.irq_name_over_temp = "pm8038_overtemp_irq",
};
static struct mfd_cell thermal_alarm_cell __devinitdata = {
.name = PM8XXX_TM_DEV_NAME,
.id = -1,
.resources = thermal_alarm_cell_resources,
.num_resources = ARRAY_SIZE(thermal_alarm_cell_resources),
.platform_data = &thermal_alarm_cdata,
.pdata_size = sizeof(struct pm8xxx_tm_core_data),
};
static const struct resource ccadc_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("PM8921_BMS_CCADC_EOC", PM8921_BMS_CCADC_EOC),
};
static struct mfd_cell ccadc_cell __devinitdata = {
.name = PM8XXX_CCADC_DEV_NAME,
.id = -1,
.resources = ccadc_cell_resources,
.num_resources = ARRAY_SIZE(ccadc_cell_resources),
};
static struct pm8xxx_vreg regulator_data[] = {
/* name pc_name ctrl test hpm_min */
NLDO1200("8038_l1", 0x0AE, 0x0AF, LDO_1200),
NLDO("8038_l2", "8038_l2_pc", 0x0B0, 0x0B1, LDO_150),
PLDO("8038_l3", "8038_l3_pc", 0x0B2, 0x0B3, LDO_50),
PLDO("8038_l4", "8038_l4_pc", 0x0B4, 0x0B5, LDO_50),
PLDO("8038_l5", "8038_l5_pc", 0x0B6, 0x0B7, LDO_600),
PLDO("8038_l6", "8038_l6_pc", 0x0B8, 0x0B9, LDO_600),
PLDO("8038_l7", "8038_l7_pc", 0x0BA, 0x0BB, LDO_600),
PLDO("8038_l8", "8038_l8_pc", 0x0BC, 0x0BD, LDO_300),
PLDO("8038_l9", "8038_l9_pc", 0x0BE, 0x0BF, LDO_300),
PLDO("8038_l10", "8038_l10_pc", 0x0C0, 0x0C1, LDO_600),
PLDO("8038_l11", "8038_l11_pc", 0x0C2, 0x0C3, LDO_600),
NLDO("8038_l12", "8038_l12_pc", 0x0C4, 0x0C5, LDO_300),
PLDO("8038_l14", "8038_l14_pc", 0x0C8, 0x0C9, LDO_50),
PLDO("8038_l15", "8038_l15_pc", 0x0CA, 0x0CB, LDO_150),
NLDO1200("8038_l16", 0x0CC, 0x0CD, LDO_1200),
PLDO("8038_l17", "8038_l17_pc", 0x0CE, 0x0CF, LDO_150),
PLDO("8038_l18", "8038_l18_pc", 0x0D0, 0x0D1, LDO_50),
NLDO1200("8038_l19", 0x0D2, 0x0D3, LDO_1200),
NLDO1200("8038_l20", 0x0D4, 0x0D5, LDO_1200),
PLDO("8038_l21", "8038_l21_pc", 0x0D6, 0x0D7, LDO_150),
PLDO("8038_l22", "8038_l22_pc", 0x0D8, 0x0D9, LDO_50),
PLDO("8038_l23", "8038_l23_pc", 0x0DA, 0x0DB, LDO_50),
NLDO1200("8038_l24", 0x0DC, 0x0DD, LDO_1200),
NLDO("8038_l26", "8038_l26_pc", 0x0E0, 0x0E1, LDO_150),
NLDO1200("8038_l27", 0x0E2, 0x0E3, LDO_1200),
/* name pc_name ctrl test2 clk sleep hpm_min */
SMPS("8038_s1", "8038_s1_pc", 0x1E0, 0x1E5, 0x009, 0x1E2, SMPS_1500),
SMPS("8038_s2", "8038_s2_pc", 0x1D8, 0x1DD, 0x00A, 0x1DA, SMPS_1500),
SMPS("8038_s3", "8038_s3_pc", 0x1D0, 0x1D5, 0x00B, 0x1D2, SMPS_1500),
SMPS("8038_s4", "8038_s4_pc", 0x1E8, 0x1ED, 0x00C, 0x1EA, SMPS_1500),
/* name ctrl fts_cnfg1 pfm pwr_cnfg hpm_min */
FTSMPS("8038_s5", 0x025, 0x02E, 0x026, 0x032, SMPS_2000),
FTSMPS("8038_s6", 0x036, 0x03F, 0x037, 0x043, SMPS_2000),
/* name pc_name ctrl test */
VS("8038_lvs1", "8038_lvs1_pc", 0x060, 0x061),
VS("8038_lvs2", "8038_lvs2_pc", 0x062, 0x063),
};
#define MAX_NAME_COMPARISON_LEN 32
static int __devinit match_regulator(
struct pm8xxx_regulator_core_platform_data *core_data, const char *name)
{
int found = 0;
int i;
for (i = 0; i < ARRAY_SIZE(regulator_data); i++) {
if (regulator_data[i].rdesc.name
&& strncmp(regulator_data[i].rdesc.name, name,
MAX_NAME_COMPARISON_LEN) == 0) {
core_data->is_pin_controlled = false;
core_data->vreg = ®ulator_data[i];
found = 1;
break;
} else if (regulator_data[i].rdesc_pc.name
&& strncmp(regulator_data[i].rdesc_pc.name, name,
MAX_NAME_COMPARISON_LEN) == 0) {
core_data->is_pin_controlled = true;
core_data->vreg = ®ulator_data[i];
found = 1;
break;
}
}
if (!found)
pr_err("could not find a match for regulator: %s\n", name);
return found;
}
static int __devinit
pm8038_add_regulators(const struct pm8038_platform_data *pdata,
struct pm8038 *pmic, int irq_base)
{
int ret = 0;
struct mfd_cell *mfd_regulators;
struct pm8xxx_regulator_core_platform_data *cdata;
int i;
/* Add one device for each regulator used by the board. */
mfd_regulators = kzalloc(sizeof(struct mfd_cell)
* (pdata->num_regulators), GFP_KERNEL);
if (!mfd_regulators) {
pr_err("Cannot allocate %d bytes for pm8038 regulator "
"mfd cells\n", sizeof(struct mfd_cell)
* (pdata->num_regulators));
return -ENOMEM;
}
cdata = kzalloc(sizeof(struct pm8xxx_regulator_core_platform_data)
* pdata->num_regulators, GFP_KERNEL);
if (!cdata) {
pr_err("Cannot allocate %d bytes for pm8038 regulator "
"core data\n", pdata->num_regulators
* sizeof(struct pm8xxx_regulator_core_platform_data));
kfree(mfd_regulators);
return -ENOMEM;
}
for (i = 0; i < ARRAY_SIZE(regulator_data); i++)
mutex_init(®ulator_data[i].pc_lock);
for (i = 0; i < pdata->num_regulators; i++) {
if (!pdata->regulator_pdatas[i].init_data.constraints.name) {
pr_err("name missing for regulator %d\n", i);
ret = -EINVAL;
goto bail;
}
if (!match_regulator(&cdata[i],
pdata->regulator_pdatas[i].init_data.constraints.name)) {
ret = -ENODEV;
goto bail;
}
cdata[i].pdata = &(pdata->regulator_pdatas[i]);
mfd_regulators[i].name = PM8XXX_REGULATOR_DEV_NAME;
mfd_regulators[i].id = cdata[i].pdata->id;
mfd_regulators[i].platform_data = &cdata[i];
mfd_regulators[i].pdata_size =
sizeof(struct pm8xxx_regulator_core_platform_data);
}
ret = mfd_add_devices(pmic->dev, 0, mfd_regulators,
pdata->num_regulators, NULL, irq_base);
if (ret)
goto bail;
pmic->mfd_regulators = mfd_regulators;
pmic->regulator_cdata = cdata;
return ret;
bail:
for (i = 0; i < ARRAY_SIZE(regulator_data); i++)
mutex_destroy(®ulator_data[i].pc_lock);
kfree(mfd_regulators);
kfree(cdata);
return ret;
}
static int __devinit
pm8038_add_subdevices(const struct pm8038_platform_data *pdata,
struct pm8038 *pmic)
{
int ret = 0, irq_base = 0;
struct pm_irq_chip *irq_chip;
if (pdata->irq_pdata) {
pdata->irq_pdata->irq_cdata.nirqs = PM8038_NR_IRQS;
pdata->irq_pdata->irq_cdata.base_addr = REG_IRQ_BASE;
irq_base = pdata->irq_pdata->irq_base;
irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata);
if (IS_ERR(irq_chip)) {
pr_err("Failed to init interrupts ret=%ld\n",
PTR_ERR(irq_chip));
return PTR_ERR(irq_chip);
}
pmic->irq_chip = irq_chip;
}
if (pdata->gpio_pdata) {
pdata->gpio_pdata->gpio_cdata.ngpios = PM8038_NR_GPIOS;
gpio_cell.platform_data = pdata->gpio_pdata;
gpio_cell.pdata_size = sizeof(struct pm8xxx_gpio_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &gpio_cell, 1,
NULL, irq_base);
if (ret) {
pr_err("Failed to add gpio subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->mpp_pdata) {
pdata->mpp_pdata->core_data.nmpps = PM8038_NR_MPPS;
pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE;
mpp_cell.platform_data = pdata->mpp_pdata;
mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add mpp subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->rtc_pdata) {
rtc_cell.platform_data = pdata->rtc_pdata;
rtc_cell.pdata_size = sizeof(struct pm8xxx_rtc_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &rtc_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add rtc subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->pwrkey_pdata) {
pwrkey_cell.platform_data = pdata->pwrkey_pdata;
pwrkey_cell.pdata_size =
sizeof(struct pm8xxx_pwrkey_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &pwrkey_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add pwrkey subdevice ret=%d\n", ret);
goto bail;
}
}
ret = mfd_add_devices(pmic->dev, 0, &pwm_cell, 1, NULL, 0);
if (ret) {
pr_err("Failed to add pwm subdevice ret=%d\n", ret);
goto bail;
}
if (pdata->misc_pdata) {
misc_cell.platform_data = pdata->misc_pdata;
misc_cell.pdata_size = sizeof(struct pm8xxx_misc_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &misc_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add misc subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->leds_pdata) {
leds_cell.platform_data = pdata->leds_pdata;
leds_cell.pdata_size = sizeof(struct pm8xxx_led_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &leds_cell, 1, NULL, 0);
if (ret) {
pr_err("Failed to add leds subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->spk_pdata) {
spk_cell.platform_data = pdata->spk_pdata;
spk_cell.pdata_size = sizeof(struct pm8xxx_spk_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &spk_cell, 1, NULL, 0);
if (ret) {
pr_err("Failed to add spk subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->num_regulators > 0 && pdata->regulator_pdatas) {
ret = pm8038_add_regulators(pdata, pmic, irq_base);
if (ret) {
pr_err("Failed to add regulator subdevices ret=%d\n",
ret);
goto bail;
}
}
ret = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base);
if (ret) {
pr_err("Failed to add debugfs subdevice ret=%d\n", ret);
goto bail;
}
if (pdata->adc_pdata) {
adc_cell.platform_data = pdata->adc_pdata;
adc_cell.pdata_size =
sizeof(struct pm8xxx_adc_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &adc_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add adc subdevices ret=%d\n",
ret);
}
}
if (pdata->charger_pdata) {
pdata->charger_pdata->charger_cdata.vbat_channel = CHANNEL_VBAT;
pdata->charger_pdata->charger_cdata.batt_temp_channel
= CHANNEL_BATT_THERM;
pdata->charger_pdata->charger_cdata.batt_id_channel
= CHANNEL_BATT_ID;
charger_cell.platform_data = pdata->charger_pdata;
charger_cell.pdata_size =
sizeof(struct pm8921_charger_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &charger_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add charger subdevice ret=%d\n", ret);
goto bail;
}
}
if (pdata->bms_pdata) {
pdata->bms_pdata->bms_cdata.batt_temp_channel
= CHANNEL_BATT_THERM;
pdata->bms_pdata->bms_cdata.vbat_channel = CHANNEL_VBAT;
pdata->bms_pdata->bms_cdata.ref625mv_channel = CHANNEL_625MV;
pdata->bms_pdata->bms_cdata.ref1p25v_channel = CHANNEL_125V;
pdata->bms_pdata->bms_cdata.batt_id_channel = CHANNEL_BATT_ID;
bms_cell.platform_data = pdata->bms_pdata;
bms_cell.pdata_size = sizeof(struct pm8921_bms_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &bms_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add bms subdevice ret=%d\n", ret);
goto bail;
}
}
ret = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add thermal alarm subdevice ret=%d\n", ret);
goto bail;
}
if (pdata->ccadc_pdata) {
ccadc_cell.platform_data = pdata->ccadc_pdata;
ccadc_cell.pdata_size =
sizeof(struct pm8xxx_ccadc_platform_data);
ret = mfd_add_devices(pmic->dev, 0, &ccadc_cell, 1, NULL,
irq_base);
if (ret) {
pr_err("Failed to add ccadc subdevice ret=%d\n", ret);
goto bail;
}
}
return 0;
bail:
if (pmic->irq_chip) {
pm8xxx_irq_exit(pmic->irq_chip);
pmic->irq_chip = NULL;
}
return ret;
}
static const char * const pm8038_rev_names[] = {
[PM8XXX_REVISION_8038_TEST] = "test",
[PM8XXX_REVISION_8038_1p0] = "1.0",
[PM8XXX_REVISION_8038_2p0] = "2.0",
[PM8XXX_REVISION_8038_2p1] = "2.1",
};
static int __devinit pm8038_probe(struct platform_device *pdev)
{
const struct pm8038_platform_data *pdata = pdev->dev.platform_data;
const char *revision_name = "unknown";
struct pm8038 *pmic;
enum pm8xxx_version version;
int revision;
int rc;
u8 val;
if (!pdata) {
pr_err("missing platform data\n");
return -EINVAL;
}
pmic = kzalloc(sizeof(struct pm8038), GFP_KERNEL);
if (!pmic) {
pr_err("Cannot alloc pm8038 struct\n");
return -ENOMEM;
}
/* Read PMIC chip revision */
rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val));
if (rc) {
pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc);
goto err_read_rev;
}
pr_info("PMIC revision 1: PM8038 rev %02X\n", val);
pmic->rev_registers = val;
/* Read PMIC chip revision 2 */
rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val));
if (rc) {
pr_err("Failed to read hw rev 2 reg %d:rc=%d\n",
REG_HWREV_2, rc);
goto err_read_rev;
}
pr_info("PMIC revision 2: PM8038 rev %02X\n", val);
pmic->rev_registers |= val << BITS_PER_BYTE;
pmic->dev = &pdev->dev;
pm8038_drvdata.pm_chip_data = pmic;
platform_set_drvdata(pdev, &pm8038_drvdata);
/* Print out human readable version and revision names. */
version = pm8xxx_get_version(pmic->dev);
if (version == PM8XXX_VERSION_8038) {
revision = pm8xxx_get_revision(pmic->dev);
if (revision >= 0 && revision < ARRAY_SIZE(pm8038_rev_names))
revision_name = pm8038_rev_names[revision];
pr_info("PMIC version: PM8038 ver %s\n", revision_name);
} else {
WARN_ON(version != PM8XXX_VERSION_8038);
}
/* Log human readable restart reason */
rc = msm_ssbi_read(pdev->dev.parent, REG_PM8038_PON_CNTRL_3, &val, 1);
if (rc) {
pr_err("Cannot read restart reason rc=%d\n", rc);
goto err_read_rev;
}
val &= PM8XXX_RESTART_REASON_MASK;
pr_info("PMIC Restart Reason: %s\n", pm8xxx_restart_reason_str[val]);
pmic->restart_reason = val;
rc = pm8038_add_subdevices(pdata, pmic);
if (rc) {
pr_err("Cannot add subdevices rc=%d\n", rc);
goto err;
}
return 0;
err:
mfd_remove_devices(pmic->dev);
platform_set_drvdata(pdev, NULL);
kfree(pmic->mfd_regulators);
kfree(pmic->regulator_cdata);
err_read_rev:
kfree(pmic);
return rc;
}
static int __devexit pm8038_remove(struct platform_device *pdev)
{
struct pm8xxx_drvdata *drvdata;
struct pm8038 *pmic = NULL;
int i;
drvdata = platform_get_drvdata(pdev);
if (drvdata)
pmic = drvdata->pm_chip_data;
if (pmic) {
if (pmic->dev)
mfd_remove_devices(pmic->dev);
if (pmic->irq_chip) {
pm8xxx_irq_exit(pmic->irq_chip);
pmic->irq_chip = NULL;
}
if (pmic->mfd_regulators) {
for (i = 0; i < ARRAY_SIZE(regulator_data); i++)
mutex_destroy(®ulator_data[i].pc_lock);
}
kfree(pmic->mfd_regulators);
kfree(pmic->regulator_cdata);
kfree(pmic);
}
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver pm8038_driver = {
.probe = pm8038_probe,
.remove = __devexit_p(pm8038_remove),
.driver = {
.name = PM8038_CORE_DEV_NAME,
.owner = THIS_MODULE,
},
};
static int __init pm8038_init(void)
{
return platform_driver_register(&pm8038_driver);
}
postcore_initcall(pm8038_init);
static void __exit pm8038_exit(void)
{
platform_driver_unregister(&pm8038_driver);
}
module_exit(pm8038_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC 8038 core driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:pm8038-core");
| gpl-2.0 |
evan-a-a/linux-braswell | drivers/rtc/rtc-ds1343.c | 322 | 16687 | /* rtc-ds1343.c
*
* Driver for Dallas Semiconductor DS1343 Low Current, SPI Compatible
* Real Time Clock
*
* Author : Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>
* Ankur Srivastava <sankurece@gmail.com> : DS1343 Nvram Support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/pm.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#define DALLAS_MAXIM_DS1343 0
#define DALLAS_MAXIM_DS1344 1
/* RTC DS1343 Registers */
#define DS1343_SECONDS_REG 0x00
#define DS1343_MINUTES_REG 0x01
#define DS1343_HOURS_REG 0x02
#define DS1343_DAY_REG 0x03
#define DS1343_DATE_REG 0x04
#define DS1343_MONTH_REG 0x05
#define DS1343_YEAR_REG 0x06
#define DS1343_ALM0_SEC_REG 0x07
#define DS1343_ALM0_MIN_REG 0x08
#define DS1343_ALM0_HOUR_REG 0x09
#define DS1343_ALM0_DAY_REG 0x0A
#define DS1343_ALM1_SEC_REG 0x0B
#define DS1343_ALM1_MIN_REG 0x0C
#define DS1343_ALM1_HOUR_REG 0x0D
#define DS1343_ALM1_DAY_REG 0x0E
#define DS1343_CONTROL_REG 0x0F
#define DS1343_STATUS_REG 0x10
#define DS1343_TRICKLE_REG 0x11
#define DS1343_NVRAM 0x20
#define DS1343_NVRAM_LEN 96
/* DS1343 Control Registers bits */
#define DS1343_EOSC 0x80
#define DS1343_DOSF 0x20
#define DS1343_EGFIL 0x10
#define DS1343_SQW 0x08
#define DS1343_INTCN 0x04
#define DS1343_A1IE 0x02
#define DS1343_A0IE 0x01
/* DS1343 Status Registers bits */
#define DS1343_OSF 0x80
#define DS1343_IRQF1 0x02
#define DS1343_IRQF0 0x01
/* DS1343 Trickle Charger Registers bits */
#define DS1343_TRICKLE_MAGIC 0xa0
#define DS1343_TRICKLE_DS1 0x08
#define DS1343_TRICKLE_1K 0x01
#define DS1343_TRICKLE_2K 0x02
#define DS1343_TRICKLE_4K 0x03
static const struct spi_device_id ds1343_id[] = {
{ "ds1343", DALLAS_MAXIM_DS1343 },
{ "ds1344", DALLAS_MAXIM_DS1344 },
{ }
};
MODULE_DEVICE_TABLE(spi, ds1343_id);
struct ds1343_priv {
struct spi_device *spi;
struct rtc_device *rtc;
struct regmap *map;
struct mutex mutex;
unsigned int irqen;
int irq;
int alarm_sec;
int alarm_min;
int alarm_hour;
int alarm_mday;
};
static int ds1343_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
#ifdef RTC_SET_CHARGE
case RTC_SET_CHARGE:
{
int val;
if (copy_from_user(&val, (int __user *)arg, sizeof(int)))
return -EFAULT;
return regmap_write(priv->map, DS1343_TRICKLE_REG, val);
}
break;
#endif
}
return -ENOIOCTLCMD;
}
static ssize_t ds1343_show_glitchfilter(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int glitch_filt_status, data;
regmap_read(priv->map, DS1343_CONTROL_REG, &data);
glitch_filt_status = !!(data & DS1343_EGFIL);
if (glitch_filt_status)
return sprintf(buf, "enabled\n");
else
return sprintf(buf, "disabled\n");
}
static ssize_t ds1343_store_glitchfilter(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int data;
regmap_read(priv->map, DS1343_CONTROL_REG, &data);
if (strncmp(buf, "enabled", 7) == 0)
data |= DS1343_EGFIL;
else if (strncmp(buf, "disabled", 8) == 0)
data &= ~(DS1343_EGFIL);
else
return -EINVAL;
regmap_write(priv->map, DS1343_CONTROL_REG, data);
return count;
}
static DEVICE_ATTR(glitch_filter, S_IRUGO | S_IWUSR, ds1343_show_glitchfilter,
ds1343_store_glitchfilter);
static ssize_t ds1343_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int ret;
unsigned char address;
struct device *dev = kobj_to_dev(kobj);
struct ds1343_priv *priv = dev_get_drvdata(dev);
address = DS1343_NVRAM + off;
ret = regmap_bulk_write(priv->map, address, buf, count);
if (ret < 0)
dev_err(&priv->spi->dev, "Error in nvram write %d", ret);
return (ret < 0) ? ret : count;
}
static ssize_t ds1343_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int ret;
unsigned char address;
struct device *dev = kobj_to_dev(kobj);
struct ds1343_priv *priv = dev_get_drvdata(dev);
address = DS1343_NVRAM + off;
ret = regmap_bulk_read(priv->map, address, buf, count);
if (ret < 0)
dev_err(&priv->spi->dev, "Error in nvram read %d\n", ret);
return (ret < 0) ? ret : count;
}
static struct bin_attribute nvram_attr = {
.attr.name = "nvram",
.attr.mode = S_IRUGO | S_IWUSR,
.read = ds1343_nvram_read,
.write = ds1343_nvram_write,
.size = DS1343_NVRAM_LEN,
};
static ssize_t ds1343_show_alarmstatus(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int alarmstatus, data;
regmap_read(priv->map, DS1343_CONTROL_REG, &data);
alarmstatus = !!(data & DS1343_A0IE);
if (alarmstatus)
return sprintf(buf, "enabled\n");
else
return sprintf(buf, "disabled\n");
}
static DEVICE_ATTR(alarm_status, S_IRUGO, ds1343_show_alarmstatus, NULL);
static ssize_t ds1343_show_alarmmode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int alarm_mode, data;
char *alarm_str;
regmap_read(priv->map, DS1343_ALM0_SEC_REG, &data);
alarm_mode = (data & 0x80) >> 4;
regmap_read(priv->map, DS1343_ALM0_MIN_REG, &data);
alarm_mode |= (data & 0x80) >> 5;
regmap_read(priv->map, DS1343_ALM0_HOUR_REG, &data);
alarm_mode |= (data & 0x80) >> 6;
regmap_read(priv->map, DS1343_ALM0_DAY_REG, &data);
alarm_mode |= (data & 0x80) >> 7;
switch (alarm_mode) {
case 15:
alarm_str = "each second";
break;
case 7:
alarm_str = "seconds match";
break;
case 3:
alarm_str = "minutes and seconds match";
break;
case 1:
alarm_str = "hours, minutes and seconds match";
break;
case 0:
alarm_str = "day, hours, minutes and seconds match";
break;
default:
alarm_str = "invalid";
break;
}
return sprintf(buf, "%s\n", alarm_str);
}
static DEVICE_ATTR(alarm_mode, S_IRUGO, ds1343_show_alarmmode, NULL);
static ssize_t ds1343_show_tricklecharger(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int data;
char *diodes = "disabled", *resistors = " ";
regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
if ((data & 0xf0) == DS1343_TRICKLE_MAGIC) {
switch (data & 0x0c) {
case DS1343_TRICKLE_DS1:
diodes = "one diode,";
break;
default:
diodes = "no diode,";
break;
}
switch (data & 0x03) {
case DS1343_TRICKLE_1K:
resistors = "1k Ohm";
break;
case DS1343_TRICKLE_2K:
resistors = "2k Ohm";
break;
case DS1343_TRICKLE_4K:
resistors = "4k Ohm";
break;
default:
diodes = "disabled";
break;
}
}
return sprintf(buf, "%s %s\n", diodes, resistors);
}
static DEVICE_ATTR(trickle_charger, S_IRUGO, ds1343_show_tricklecharger, NULL);
static int ds1343_sysfs_register(struct device *dev)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int err;
err = device_create_file(dev, &dev_attr_glitch_filter);
if (err)
return err;
err = device_create_file(dev, &dev_attr_trickle_charger);
if (err)
goto error1;
err = device_create_bin_file(dev, &nvram_attr);
if (err)
goto error2;
if (priv->irq <= 0)
return err;
err = device_create_file(dev, &dev_attr_alarm_mode);
if (err)
goto error3;
err = device_create_file(dev, &dev_attr_alarm_status);
if (!err)
return err;
device_remove_file(dev, &dev_attr_alarm_mode);
error3:
device_remove_bin_file(dev, &nvram_attr);
error2:
device_remove_file(dev, &dev_attr_trickle_charger);
error1:
device_remove_file(dev, &dev_attr_glitch_filter);
return err;
}
static void ds1343_sysfs_unregister(struct device *dev)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
device_remove_file(dev, &dev_attr_glitch_filter);
device_remove_file(dev, &dev_attr_trickle_charger);
device_remove_bin_file(dev, &nvram_attr);
if (priv->irq <= 0)
return;
device_remove_file(dev, &dev_attr_alarm_status);
device_remove_file(dev, &dev_attr_alarm_mode);
}
static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
unsigned char buf[7];
int res;
res = regmap_bulk_read(priv->map, DS1343_SECONDS_REG, buf, 7);
if (res)
return res;
dt->tm_sec = bcd2bin(buf[0]);
dt->tm_min = bcd2bin(buf[1]);
dt->tm_hour = bcd2bin(buf[2] & 0x3F);
dt->tm_wday = bcd2bin(buf[3]) - 1;
dt->tm_mday = bcd2bin(buf[4]);
dt->tm_mon = bcd2bin(buf[5] & 0x1F) - 1;
dt->tm_year = bcd2bin(buf[6]) + 100; /* year offset from 1900 */
return rtc_valid_tm(dt);
}
static int ds1343_set_time(struct device *dev, struct rtc_time *dt)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int res;
res = regmap_write(priv->map, DS1343_SECONDS_REG,
bin2bcd(dt->tm_sec));
if (res)
return res;
res = regmap_write(priv->map, DS1343_MINUTES_REG,
bin2bcd(dt->tm_min));
if (res)
return res;
res = regmap_write(priv->map, DS1343_HOURS_REG,
bin2bcd(dt->tm_hour) & 0x3F);
if (res)
return res;
res = regmap_write(priv->map, DS1343_DAY_REG,
bin2bcd(dt->tm_wday + 1));
if (res)
return res;
res = regmap_write(priv->map, DS1343_DATE_REG,
bin2bcd(dt->tm_mday));
if (res)
return res;
res = regmap_write(priv->map, DS1343_MONTH_REG,
bin2bcd(dt->tm_mon + 1));
if (res)
return res;
dt->tm_year %= 100;
res = regmap_write(priv->map, DS1343_YEAR_REG,
bin2bcd(dt->tm_year));
if (res)
return res;
return 0;
}
static int ds1343_update_alarm(struct device *dev)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
unsigned int control, stat;
unsigned char buf[4];
int res = 0;
res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
if (res)
return res;
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
if (res)
return res;
control &= ~(DS1343_A0IE);
stat &= ~(DS1343_IRQF0);
res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
if (res)
return res;
res = regmap_write(priv->map, DS1343_STATUS_REG, stat);
if (res)
return res;
buf[0] = priv->alarm_sec < 0 || (priv->irqen & RTC_UF) ?
0x80 : bin2bcd(priv->alarm_sec) & 0x7F;
buf[1] = priv->alarm_min < 0 || (priv->irqen & RTC_UF) ?
0x80 : bin2bcd(priv->alarm_min) & 0x7F;
buf[2] = priv->alarm_hour < 0 || (priv->irqen & RTC_UF) ?
0x80 : bin2bcd(priv->alarm_hour) & 0x3F;
buf[3] = priv->alarm_mday < 0 || (priv->irqen & RTC_UF) ?
0x80 : bin2bcd(priv->alarm_mday) & 0x7F;
res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
if (res)
return res;
if (priv->irqen) {
control |= DS1343_A0IE;
res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
}
return res;
}
static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int res = 0;
unsigned int stat;
if (priv->irq <= 0)
return -EINVAL;
mutex_lock(&priv->mutex);
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
if (res)
goto out;
alarm->enabled = !!(priv->irqen & RTC_AF);
alarm->pending = !!(stat & DS1343_IRQF0);
alarm->time.tm_sec = priv->alarm_sec < 0 ? 0 : priv->alarm_sec;
alarm->time.tm_min = priv->alarm_min < 0 ? 0 : priv->alarm_min;
alarm->time.tm_hour = priv->alarm_hour < 0 ? 0 : priv->alarm_hour;
alarm->time.tm_mday = priv->alarm_mday < 0 ? 0 : priv->alarm_mday;
out:
mutex_unlock(&priv->mutex);
return res;
}
static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int res = 0;
if (priv->irq <= 0)
return -EINVAL;
mutex_lock(&priv->mutex);
priv->alarm_sec = alarm->time.tm_sec;
priv->alarm_min = alarm->time.tm_min;
priv->alarm_hour = alarm->time.tm_hour;
priv->alarm_mday = alarm->time.tm_mday;
if (alarm->enabled)
priv->irqen |= RTC_AF;
res = ds1343_update_alarm(dev);
mutex_unlock(&priv->mutex);
return res;
}
static int ds1343_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
int res = 0;
if (priv->irq <= 0)
return -EINVAL;
mutex_lock(&priv->mutex);
if (enabled)
priv->irqen |= RTC_AF;
else
priv->irqen &= ~RTC_AF;
res = ds1343_update_alarm(dev);
mutex_unlock(&priv->mutex);
return res;
}
static irqreturn_t ds1343_thread(int irq, void *dev_id)
{
struct ds1343_priv *priv = dev_id;
unsigned int stat, control;
int res = 0;
mutex_lock(&priv->mutex);
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
if (res)
goto out;
if (stat & DS1343_IRQF0) {
stat &= ~DS1343_IRQF0;
regmap_write(priv->map, DS1343_STATUS_REG, stat);
res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
if (res)
goto out;
control &= ~DS1343_A0IE;
regmap_write(priv->map, DS1343_CONTROL_REG, control);
rtc_update_irq(priv->rtc, 1, RTC_AF | RTC_IRQF);
}
out:
mutex_unlock(&priv->mutex);
return IRQ_HANDLED;
}
static const struct rtc_class_ops ds1343_rtc_ops = {
.ioctl = ds1343_ioctl,
.read_time = ds1343_read_time,
.set_time = ds1343_set_time,
.read_alarm = ds1343_read_alarm,
.set_alarm = ds1343_set_alarm,
.alarm_irq_enable = ds1343_alarm_irq_enable,
};
static int ds1343_probe(struct spi_device *spi)
{
struct ds1343_priv *priv;
struct regmap_config config;
unsigned int data;
int res;
memset(&config, 0, sizeof(config));
config.reg_bits = 8;
config.val_bits = 8;
config.write_flag_mask = 0x80;
priv = devm_kzalloc(&spi->dev, sizeof(struct ds1343_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->spi = spi;
mutex_init(&priv->mutex);
/* RTC DS1347 works in spi mode 3 and
* its chip select is active high
*/
spi->mode = SPI_MODE_3 | SPI_CS_HIGH;
spi->bits_per_word = 8;
res = spi_setup(spi);
if (res)
return res;
spi_set_drvdata(spi, priv);
priv->map = devm_regmap_init_spi(spi, &config);
if (IS_ERR(priv->map)) {
dev_err(&spi->dev, "spi regmap init failed for rtc ds1343\n");
return PTR_ERR(priv->map);
}
res = regmap_read(priv->map, DS1343_SECONDS_REG, &data);
if (res)
return res;
regmap_read(priv->map, DS1343_CONTROL_REG, &data);
data |= DS1343_INTCN;
data &= ~(DS1343_EOSC | DS1343_A1IE | DS1343_A0IE);
regmap_write(priv->map, DS1343_CONTROL_REG, data);
regmap_read(priv->map, DS1343_STATUS_REG, &data);
data &= ~(DS1343_OSF | DS1343_IRQF1 | DS1343_IRQF0);
regmap_write(priv->map, DS1343_STATUS_REG, data);
priv->rtc = devm_rtc_device_register(&spi->dev, "ds1343",
&ds1343_rtc_ops, THIS_MODULE);
if (IS_ERR(priv->rtc)) {
dev_err(&spi->dev, "unable to register rtc ds1343\n");
return PTR_ERR(priv->rtc);
}
priv->irq = spi->irq;
if (priv->irq >= 0) {
res = devm_request_threaded_irq(&spi->dev, spi->irq, NULL,
ds1343_thread, IRQF_ONESHOT,
"ds1343", priv);
if (res) {
priv->irq = -1;
dev_err(&spi->dev,
"unable to request irq for rtc ds1343\n");
} else {
device_init_wakeup(&spi->dev, true);
dev_pm_set_wake_irq(&spi->dev, spi->irq);
}
}
res = ds1343_sysfs_register(&spi->dev);
if (res)
dev_err(&spi->dev,
"unable to create sysfs entries for rtc ds1343\n");
return 0;
}
static int ds1343_remove(struct spi_device *spi)
{
struct ds1343_priv *priv = spi_get_drvdata(spi);
if (spi->irq) {
mutex_lock(&priv->mutex);
priv->irqen &= ~RTC_AF;
mutex_unlock(&priv->mutex);
dev_pm_clear_wake_irq(&spi->dev);
device_init_wakeup(&spi->dev, false);
devm_free_irq(&spi->dev, spi->irq, priv);
}
spi_set_drvdata(spi, NULL);
ds1343_sysfs_unregister(&spi->dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ds1343_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
if (spi->irq >= 0 && device_may_wakeup(dev))
enable_irq_wake(spi->irq);
return 0;
}
static int ds1343_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
if (spi->irq >= 0 && device_may_wakeup(dev))
disable_irq_wake(spi->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(ds1343_pm, ds1343_suspend, ds1343_resume);
static struct spi_driver ds1343_driver = {
.driver = {
.name = "ds1343",
.pm = &ds1343_pm,
},
.probe = ds1343_probe,
.remove = ds1343_remove,
.id_table = ds1343_id,
};
module_spi_driver(ds1343_driver);
MODULE_DESCRIPTION("DS1343 RTC SPI Driver");
MODULE_AUTHOR("Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>,"
"Ankur Srivastava <sankurece@gmail.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
TeamEpsilon/linux-3.8 | drivers/devfreq/devfreq.c | 578 | 28289 | /*
* devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
* for Non-CPU Devices.
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/opp.h>
#include <linux/devfreq.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/printk.h>
#include <linux/hrtimer.h>
#include "governor.h"
static struct class *devfreq_class;
/*
* devfreq core provides delayed work based load monitoring helper
* functions. Governors can use these or can implement their own
* monitoring mechanism.
*/
static struct workqueue_struct *devfreq_wq;
/* The list of all device-devfreq governors */
static LIST_HEAD(devfreq_governor_list);
/* The list of all device-devfreq */
static LIST_HEAD(devfreq_list);
static DEFINE_MUTEX(devfreq_list_lock);
/**
* find_device_devfreq() - find devfreq struct using device pointer
* @dev: device pointer used to lookup device devfreq.
*
* Search the list of device devfreqs and return the matched device's
* devfreq info. devfreq_list_lock should be held by the caller.
*/
static struct devfreq *find_device_devfreq(struct device *dev)
{
struct devfreq *tmp_devfreq;
if (unlikely(IS_ERR_OR_NULL(dev))) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
WARN(!mutex_is_locked(&devfreq_list_lock),
"devfreq_list_lock must be locked.");
list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
if (tmp_devfreq->dev.parent == dev)
return tmp_devfreq;
}
return ERR_PTR(-ENODEV);
}
/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
* @freq: the target frequency
*/
static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
{
int lev;
for (lev = 0; lev < devfreq->profile->max_state; lev++)
if (freq == devfreq->profile->freq_table[lev])
return lev;
return -EINVAL;
}
/**
* devfreq_update_status() - Update statistics of devfreq behavior
* @devfreq: the devfreq instance
* @freq: the update target frequency
*/
static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
{
int lev, prev_lev;
unsigned long cur_time;
lev = devfreq_get_freq_level(devfreq, freq);
if (lev < 0)
return lev;
cur_time = jiffies;
devfreq->time_in_state[lev] +=
cur_time - devfreq->last_stat_updated;
if (freq != devfreq->previous_freq) {
prev_lev = devfreq_get_freq_level(devfreq,
devfreq->previous_freq);
devfreq->trans_table[(prev_lev *
devfreq->profile->max_state) + lev]++;
devfreq->total_trans++;
}
devfreq->last_stat_updated = cur_time;
return 0;
}
/**
* find_devfreq_governor() - find devfreq governor from name
* @name: name of the governor
*
* Search the list of devfreq governors and return the matched
* governor's pointer. devfreq_list_lock should be held by the caller.
*/
static struct devfreq_governor *find_devfreq_governor(const char *name)
{
struct devfreq_governor *tmp_governor;
if (unlikely(IS_ERR_OR_NULL(name))) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
WARN(!mutex_is_locked(&devfreq_list_lock),
"devfreq_list_lock must be locked.");
list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
return tmp_governor;
}
return ERR_PTR(-ENODEV);
}
/* Load monitoring helper functions for governors use */
/**
* update_devfreq() - Reevaluate the device and configure frequency.
* @devfreq: the devfreq instance.
*
* Note: Lock devfreq->lock before calling update_devfreq
* This function is exported for governors.
*/
int update_devfreq(struct devfreq *devfreq)
{
unsigned long freq;
int err = 0;
u32 flags = 0;
if (!mutex_is_locked(&devfreq->lock)) {
WARN(true, "devfreq->lock must be locked by the caller.\n");
return -EINVAL;
}
if (!devfreq->governor)
return -EINVAL;
/* Reevaluate the proper frequency */
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
return err;
/*
* Adjust the freuqency with user freq and QoS.
*
* List from the highest proiority
* max_freq (probably called by thermal when it's too hot)
* min_freq
*/
if (devfreq->min_freq && freq < devfreq->min_freq) {
freq = devfreq->min_freq;
flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
}
if (devfreq->max_freq && freq > devfreq->max_freq) {
freq = devfreq->max_freq;
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
if (err)
return err;
if (devfreq->profile->freq_table)
if (devfreq_update_status(devfreq, freq))
dev_err(&devfreq->dev,
"Couldn't update frequency transition information.\n");
devfreq->previous_freq = freq;
return err;
}
EXPORT_SYMBOL(update_devfreq);
/**
* devfreq_monitor() - Periodically poll devfreq objects.
* @work: the work struct used to run devfreq_monitor periodically.
*
*/
static void devfreq_monitor(struct work_struct *work)
{
int err;
struct devfreq *devfreq = container_of(work,
struct devfreq, work.work);
mutex_lock(&devfreq->lock);
err = update_devfreq(devfreq);
if (err)
dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
mutex_unlock(&devfreq->lock);
}
/**
* devfreq_monitor_start() - Start load monitoring of devfreq instance
* @devfreq: the devfreq instance.
*
* Helper function for starting devfreq device load monitoing. By
* default delayed work based monitoring is supported. Function
* to be called from governor in response to DEVFREQ_GOV_START
* event when device is added to devfreq framework.
*/
void devfreq_monitor_start(struct devfreq *devfreq)
{
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
}
EXPORT_SYMBOL(devfreq_monitor_start);
/**
* devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
* @devfreq: the devfreq instance.
*
* Helper function to stop devfreq device load monitoing. Function
* to be called from governor in response to DEVFREQ_GOV_STOP
* event when device is removed from devfreq framework.
*/
void devfreq_monitor_stop(struct devfreq *devfreq)
{
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_stop);
/**
* devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
* @devfreq: the devfreq instance.
*
* Helper function to suspend devfreq device load monitoing. Function
* to be called from governor in response to DEVFREQ_GOV_SUSPEND
* event or when polling interval is set to zero.
*
* Note: Though this function is same as devfreq_monitor_stop(),
* intentionally kept separate to provide hooks for collecting
* transition statistics.
*/
void devfreq_monitor_suspend(struct devfreq *devfreq)
{
mutex_lock(&devfreq->lock);
if (devfreq->stop_polling) {
mutex_unlock(&devfreq->lock);
return;
}
devfreq->stop_polling = true;
mutex_unlock(&devfreq->lock);
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_suspend);
/**
* devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
* @devfreq: the devfreq instance.
*
* Helper function to resume devfreq device load monitoing. Function
* to be called from governor in response to DEVFREQ_GOV_RESUME
* event or when polling interval is set to non-zero.
*/
void devfreq_monitor_resume(struct devfreq *devfreq)
{
mutex_lock(&devfreq->lock);
if (!devfreq->stop_polling)
goto out;
if (!delayed_work_pending(&devfreq->work) &&
devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
devfreq->stop_polling = false;
out:
mutex_unlock(&devfreq->lock);
}
EXPORT_SYMBOL(devfreq_monitor_resume);
/**
* devfreq_interval_update() - Update device devfreq monitoring interval
* @devfreq: the devfreq instance.
* @delay: new polling interval to be set.
*
* Helper function to set new load monitoring polling interval. Function
* to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
*/
void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
{
unsigned int cur_delay = devfreq->profile->polling_ms;
unsigned int new_delay = *delay;
mutex_lock(&devfreq->lock);
devfreq->profile->polling_ms = new_delay;
if (devfreq->stop_polling)
goto out;
/* if new delay is zero, stop polling */
if (!new_delay) {
mutex_unlock(&devfreq->lock);
cancel_delayed_work_sync(&devfreq->work);
return;
}
/* if current delay is zero, start polling with new delay */
if (!cur_delay) {
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
goto out;
}
/* if current delay is greater than new delay, restart polling */
if (cur_delay > new_delay) {
mutex_unlock(&devfreq->lock);
cancel_delayed_work_sync(&devfreq->work);
mutex_lock(&devfreq->lock);
if (!devfreq->stop_polling)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
}
out:
mutex_unlock(&devfreq->lock);
}
EXPORT_SYMBOL(devfreq_interval_update);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
* has been changed out of devfreq framework.
* @nb: the notifier_block (supposed to be devfreq->nb)
* @type: not used
* @devp: not used
*
* Called by a notifier that uses devfreq->nb.
*/
static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
void *devp)
{
struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
int ret;
mutex_lock(&devfreq->lock);
ret = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
return ret;
}
/**
* _remove_devfreq() - Remove devfreq from the list and release its resources.
* @devfreq: the devfreq struct
* @skip: skip calling device_unregister().
*/
static void _remove_devfreq(struct devfreq *devfreq, bool skip)
{
mutex_lock(&devfreq_list_lock);
if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
mutex_unlock(&devfreq_list_lock);
dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
return;
}
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
if (devfreq->governor)
devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
if (!skip && get_device(&devfreq->dev)) {
device_unregister(&devfreq->dev);
put_device(&devfreq->dev);
}
mutex_destroy(&devfreq->lock);
kfree(devfreq);
}
/**
* devfreq_dev_release() - Callback for struct device to release the device.
* @dev: the devfreq device
*
* This calls _remove_devfreq() if _remove_devfreq() is not called.
* Note that devfreq_dev_release() could be called by _remove_devfreq() as
* well as by others unregistering the device.
*/
static void devfreq_dev_release(struct device *dev)
{
struct devfreq *devfreq = to_devfreq(dev);
_remove_devfreq(devfreq, true);
}
/**
* devfreq_add_device() - Add devfreq feature to the device
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
* @governor_name: name of the policy to choose frequency.
* @data: private data for the governor. The devfreq framework does not
* touch this value.
*/
struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
const char *governor_name,
void *data)
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
int err = 0;
if (!dev || !profile || !governor_name) {
dev_err(dev, "%s: Invalid parameters.\n", __func__);
return ERR_PTR(-EINVAL);
}
mutex_lock(&devfreq_list_lock);
devfreq = find_device_devfreq(dev);
mutex_unlock(&devfreq_list_lock);
if (!IS_ERR(devfreq)) {
dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
err = -EINVAL;
goto err_out;
}
devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
if (!devfreq) {
dev_err(dev, "%s: Unable to create devfreq for the device\n",
__func__);
err = -ENOMEM;
goto err_out;
}
mutex_init(&devfreq->lock);
mutex_lock(&devfreq->lock);
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
devfreq->data = data;
devfreq->nb.notifier_call = devfreq_notifier_call;
devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
devfreq->profile->max_state *
devfreq->profile->max_state,
GFP_KERNEL);
devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
devfreq->profile->max_state,
GFP_KERNEL);
devfreq->last_stat_updated = jiffies;
dev_set_name(&devfreq->dev, dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
put_device(&devfreq->dev);
mutex_unlock(&devfreq->lock);
goto err_dev;
}
mutex_unlock(&devfreq->lock);
mutex_lock(&devfreq_list_lock);
list_add(&devfreq->node, &devfreq_list);
governor = find_devfreq_governor(devfreq->governor_name);
if (!IS_ERR(governor))
devfreq->governor = governor;
if (devfreq->governor)
err = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
mutex_unlock(&devfreq_list_lock);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
goto err_init;
}
return devfreq;
err_init:
list_del(&devfreq->node);
device_unregister(&devfreq->dev);
err_dev:
kfree(devfreq);
err_out:
return ERR_PTR(err);
}
EXPORT_SYMBOL(devfreq_add_device);
/**
* devfreq_remove_device() - Remove devfreq feature from a device.
* @devfreq: the devfreq instance to be removed
*/
int devfreq_remove_device(struct devfreq *devfreq)
{
if (!devfreq)
return -EINVAL;
_remove_devfreq(devfreq, false);
return 0;
}
EXPORT_SYMBOL(devfreq_remove_device);
/**
* devfreq_suspend_device() - Suspend devfreq of a device.
* @devfreq: the devfreq instance to be suspended
*/
int devfreq_suspend_device(struct devfreq *devfreq)
{
if (!devfreq)
return -EINVAL;
if (!devfreq->governor)
return 0;
return devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_SUSPEND, NULL);
}
EXPORT_SYMBOL(devfreq_suspend_device);
/**
* devfreq_resume_device() - Resume devfreq of a device.
* @devfreq: the devfreq instance to be resumed
*/
int devfreq_resume_device(struct devfreq *devfreq)
{
if (!devfreq)
return -EINVAL;
if (!devfreq->governor)
return 0;
return devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_RESUME, NULL);
}
EXPORT_SYMBOL(devfreq_resume_device);
/**
* devfreq_add_governor() - Add devfreq governor
* @governor: the devfreq governor to be added
*/
int devfreq_add_governor(struct devfreq_governor *governor)
{
struct devfreq_governor *g;
struct devfreq *devfreq;
int err = 0;
if (!governor) {
pr_err("%s: Invalid parameters.\n", __func__);
return -EINVAL;
}
mutex_lock(&devfreq_list_lock);
g = find_devfreq_governor(governor->name);
if (!IS_ERR(g)) {
pr_err("%s: governor %s already registered\n", __func__,
g->name);
err = -EINVAL;
goto err_out;
}
list_add(&governor->node, &devfreq_governor_list);
list_for_each_entry(devfreq, &devfreq_list, node) {
int ret = 0;
struct device *dev = devfreq->dev.parent;
if (!strncmp(devfreq->governor_name, governor->name,
DEVFREQ_NAME_LEN)) {
/* The following should never occur */
if (devfreq->governor) {
dev_warn(dev,
"%s: Governor %s already present\n",
__func__, devfreq->governor->name);
ret = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
dev_warn(dev,
"%s: Governor %s stop = %d\n",
__func__,
devfreq->governor->name, ret);
}
/* Fall through */
}
devfreq->governor = governor;
ret = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
if (ret) {
dev_warn(dev, "%s: Governor %s start=%d\n",
__func__, devfreq->governor->name,
ret);
}
}
}
err_out:
mutex_unlock(&devfreq_list_lock);
return err;
}
EXPORT_SYMBOL(devfreq_add_governor);
/**
* devfreq_remove_device() - Remove devfreq feature from a device.
* @governor: the devfreq governor to be removed
*/
int devfreq_remove_governor(struct devfreq_governor *governor)
{
struct devfreq_governor *g;
struct devfreq *devfreq;
int err = 0;
if (!governor) {
pr_err("%s: Invalid parameters.\n", __func__);
return -EINVAL;
}
mutex_lock(&devfreq_list_lock);
g = find_devfreq_governor(governor->name);
if (IS_ERR(g)) {
pr_err("%s: governor %s not registered\n", __func__,
governor->name);
err = PTR_ERR(g);
goto err_out;
}
list_for_each_entry(devfreq, &devfreq_list, node) {
int ret;
struct device *dev = devfreq->dev.parent;
if (!strncmp(devfreq->governor_name, governor->name,
DEVFREQ_NAME_LEN)) {
/* we should have a devfreq governor! */
if (!devfreq->governor) {
dev_warn(dev, "%s: Governor %s NOT present\n",
__func__, governor->name);
continue;
/* Fall through */
}
ret = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
dev_warn(dev, "%s: Governor %s stop=%d\n",
__func__, devfreq->governor->name,
ret);
}
devfreq->governor = NULL;
}
}
list_del(&governor->node);
err_out:
mutex_unlock(&devfreq_list_lock);
return err;
}
EXPORT_SYMBOL(devfreq_remove_governor);
static ssize_t show_governor(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (!to_devfreq(dev)->governor)
return -EINVAL;
return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
}
static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
int ret;
char str_governor[DEVFREQ_NAME_LEN + 1];
struct devfreq_governor *governor;
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
if (ret != 1)
return -EINVAL;
mutex_lock(&devfreq_list_lock);
governor = find_devfreq_governor(str_governor);
if (IS_ERR(governor)) {
ret = PTR_ERR(governor);
goto out;
}
if (df->governor == governor)
goto out;
if (df->governor) {
ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
if (ret) {
dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
__func__, df->governor->name, ret);
goto out;
}
}
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
if (ret)
dev_warn(dev, "%s: Governor %s not started(%d)\n",
__func__, df->governor->name, ret);
out:
mutex_unlock(&devfreq_list_lock);
if (!ret)
ret = count;
return ret;
}
static ssize_t show_available_governors(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct devfreq_governor *tmp_governor;
ssize_t count = 0;
mutex_lock(&devfreq_list_lock);
list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
"%s ", tmp_governor->name);
mutex_unlock(&devfreq_list_lock);
/* Truncate the trailing space */
if (count)
count--;
count += sprintf(&buf[count], "\n");
return count;
}
static ssize_t show_freq(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long freq;
struct devfreq *devfreq = to_devfreq(dev);
if (devfreq->profile->get_cur_freq &&
!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
return sprintf(buf, "%lu\n", freq);
return sprintf(buf, "%lu\n", devfreq->previous_freq);
}
static ssize_t show_target_freq(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
}
static ssize_t show_polling_interval(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
}
static ssize_t store_polling_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
unsigned int value;
int ret;
if (!df->governor)
return -EINVAL;
ret = sscanf(buf, "%u", &value);
if (ret != 1)
return -EINVAL;
df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
ret = count;
return ret;
}
static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
unsigned long value;
int ret;
unsigned long max;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
max = df->max_freq;
if (value && max && value > max) {
ret = -EINVAL;
goto unlock;
}
df->min_freq = value;
update_devfreq(df);
ret = count;
unlock:
mutex_unlock(&df->lock);
return ret;
}
static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
}
static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
unsigned long value;
int ret;
unsigned long min;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
min = df->min_freq;
if (value && min && value < min) {
ret = -EINVAL;
goto unlock;
}
df->max_freq = value;
update_devfreq(df);
ret = count;
unlock:
mutex_unlock(&df->lock);
return ret;
}
static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
}
static ssize_t show_available_freqs(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct devfreq *df = to_devfreq(d);
struct device *dev = df->dev.parent;
struct opp *opp;
ssize_t count = 0;
unsigned long freq = 0;
rcu_read_lock();
do {
opp = opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
break;
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
"%lu ", freq);
freq++;
} while (1);
rcu_read_unlock();
/* Truncate the trailing space */
if (count)
count--;
count += sprintf(&buf[count], "\n");
return count;
}
static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct devfreq *devfreq = to_devfreq(dev);
ssize_t len;
int i, j, err;
unsigned int max_state = devfreq->profile->max_state;
err = devfreq_update_status(devfreq, devfreq->previous_freq);
if (err)
return 0;
len = sprintf(buf, " From : To\n");
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
len += sprintf(buf + len, "%8u",
devfreq->profile->freq_table[i]);
len += sprintf(buf + len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
if (devfreq->profile->freq_table[i]
== devfreq->previous_freq) {
len += sprintf(buf + len, "*");
} else {
len += sprintf(buf + len, " ");
}
len += sprintf(buf + len, "%8u:",
devfreq->profile->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%8u",
devfreq->trans_table[(i * max_state) + j]);
len += sprintf(buf + len, "%10u\n",
jiffies_to_msecs(devfreq->time_in_state[i]));
}
len += sprintf(buf + len, "Total transition : %u\n",
devfreq->total_trans);
return len;
}
static struct device_attribute devfreq_attrs[] = {
__ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
__ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
__ATTR(cur_freq, S_IRUGO, show_freq, NULL),
__ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
__ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
store_polling_interval),
__ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
__ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
__ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
{ },
};
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
if (IS_ERR(devfreq_class)) {
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_class);
}
devfreq_wq = create_freezable_workqueue("devfreq_wq");
if (IS_ERR(devfreq_wq)) {
class_destroy(devfreq_class);
pr_err("%s: couldn't create workqueue\n", __FILE__);
return PTR_ERR(devfreq_wq);
}
devfreq_class->dev_attrs = devfreq_attrs;
return 0;
}
subsys_initcall(devfreq_init);
static void __exit devfreq_exit(void)
{
class_destroy(devfreq_class);
destroy_workqueue(devfreq_wq);
}
module_exit(devfreq_exit);
/*
* The followings are helper functions for devfreq user device drivers with
* OPP framework.
*/
/**
* devfreq_recommended_opp() - Helper function to get proper OPP for the
* freq value given to target callback.
* @dev: The devfreq user device. (parent of devfreq)
* @freq: The frequency given to target function
* @flags: Flags handed from devfreq framework.
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
* returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
u32 flags)
{
struct opp *opp;
if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
/* The freq is an upper bound. opp should be lower */
opp = opp_find_freq_floor(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_ceil(dev, freq);
} else {
/* The freq is an lower bound. opp should be higher */
opp = opp_find_freq_ceil(dev, freq);
/* If not available, use the closest opp */
if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_floor(dev, freq);
}
return opp;
}
/**
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
* for any changes in the OPP availability
* changes
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
struct srcu_notifier_head *nh;
int ret = 0;
rcu_read_lock();
nh = opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
if (!ret)
ret = srcu_notifier_chain_register(nh, &devfreq->nb);
return ret;
}
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
* notified for any changes in the OPP
* availability changes anymore.
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*
* At exit() callback of devfreq_dev_profile, this must be included if
* devfreq_recommended_opp is used.
*/
int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
struct srcu_notifier_head *nh;
int ret = 0;
rcu_read_lock();
nh = opp_get_notifier(dev);
if (IS_ERR(nh))
ret = PTR_ERR(nh);
rcu_read_unlock();
if (!ret)
ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
return ret;
}
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("devfreq class support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
cwyy/kernel | drivers/infiniband/hw/ipath/ipath_verbs.c | 578 | 63358 | /*
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_mad.h>
#include <rdma/ib_user_verbs.h>
#include <linux/io.h>
#include <linux/utsname.h>
#include <linux/rculist.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
#include "ipath_common.h"
static unsigned int ib_ipath_qp_table_size = 251;
module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
unsigned int ib_ipath_lkey_table_size = 12;
module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
S_IRUGO);
MODULE_PARM_DESC(lkey_table_size,
"LKEY table size in bits (2^n, 1 <= n <= 23)");
static unsigned int ib_ipath_max_pds = 0xFFFF;
module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_pds,
"Maximum number of protection domains to support");
static unsigned int ib_ipath_max_ahs = 0xFFFF;
module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
unsigned int ib_ipath_max_cqes = 0x2FFFF;
module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_cqes,
"Maximum number of completion queue entries to support");
unsigned int ib_ipath_max_cqs = 0x1FFFF;
module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
unsigned int ib_ipath_max_qps = 16384;
module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
unsigned int ib_ipath_max_sges = 0x60;
module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
unsigned int ib_ipath_max_mcast_grps = 16384;
module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_mcast_grps,
"Maximum number of multicast groups to support");
unsigned int ib_ipath_max_mcast_qp_attached = 16;
module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_mcast_qp_attached,
"Maximum number of attached QPs to support");
unsigned int ib_ipath_max_srqs = 1024;
module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
unsigned int ib_ipath_max_srq_sges = 128;
module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
static unsigned int ib_ipath_disable_sma;
module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
* Note that it is OK to post send work requests in the SQE and ERR
* states; ipath_do_send() will process them and generate error
* completions as per IB 1.2 C10-96.
*/
const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = 0,
[IB_QPS_INIT] = IPATH_POST_RECV_OK,
[IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
[IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK |
IPATH_PROCESS_NEXT_SEND_OK,
[IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
[IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
[IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV |
IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
};
struct ipath_ucontext {
struct ib_ucontext ibucontext;
};
static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
*ibucontext)
{
return container_of(ibucontext, struct ipath_ucontext, ibucontext);
}
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
};
/*
* System image GUID.
*/
static __be64 sys_image_guid;
/**
* ipath_copy_sge - copy data to SGE memory
* @ss: the SGE state
* @data: the data to copy
* @length: the length of the data
*/
void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
{
struct ipath_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
if (len > length)
len = length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
memcpy(sge->vaddr, data, len);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr != NULL) {
if (++sge->n >= IPATH_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
data += len;
length -= len;
}
}
/**
* ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
* @ss: the SGE state
* @length: the number of bytes to skip
*/
void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
{
struct ipath_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
if (len > length)
len = length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr != NULL) {
if (++sge->n >= IPATH_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
length -= len;
}
}
/*
* Count the number of DMA descriptors needed to send length bytes of data.
* Don't modify the ipath_sge_state to get the count.
* Return zero if any of the segments is not aligned.
*/
static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
{
struct ipath_sge *sg_list = ss->sg_list;
struct ipath_sge sge = ss->sge;
u8 num_sge = ss->num_sge;
u32 ndesc = 1; /* count the header */
while (length) {
u32 len = sge.length;
if (len > length)
len = length;
if (len > sge.sge_length)
len = sge.sge_length;
BUG_ON(len == 0);
if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
(len != length && (len & (sizeof(u32) - 1)))) {
ndesc = 0;
break;
}
ndesc++;
sge.vaddr += len;
sge.length -= len;
sge.sge_length -= len;
if (sge.sge_length == 0) {
if (--num_sge)
sge = *sg_list++;
} else if (sge.length == 0 && sge.mr != NULL) {
if (++sge.n >= IPATH_SEGSZ) {
if (++sge.m >= sge.mr->mapsz)
break;
sge.n = 0;
}
sge.vaddr =
sge.mr->map[sge.m]->segs[sge.n].vaddr;
sge.length =
sge.mr->map[sge.m]->segs[sge.n].length;
}
length -= len;
}
return ndesc;
}
/*
* Copy from the SGEs to the data buffer.
*/
static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
u32 length)
{
struct ipath_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
if (len > length)
len = length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
memcpy(data, sge->vaddr, len);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr != NULL) {
if (++sge->n >= IPATH_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
data += len;
length -= len;
}
}
/**
* ipath_post_one_send - post one RC, UC, or UD send work request
* @qp: the QP to post on
* @wr: the work request to send
*/
static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
{
struct ipath_swqe *wqe;
u32 next;
int i;
int j;
int acc;
int ret;
unsigned long flags;
struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
spin_lock_irqsave(&qp->s_lock, flags);
if (qp->ibqp.qp_type != IB_QPT_SMI &&
!(dd->ipath_flags & IPATH_LINKACTIVE)) {
ret = -ENETDOWN;
goto bail;
}
/* Check that state is OK to post send. */
if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
goto bail_inval;
/* IB spec says that num_sge == 0 is OK. */
if (wr->num_sge > qp->s_max_sge)
goto bail_inval;
/*
* Don't allow RDMA reads or atomic operations on UC or
* undefined operations.
* Make sure buffer is large enough to hold the result for atomics.
*/
if (qp->ibqp.qp_type == IB_QPT_UC) {
if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
goto bail_inval;
} else if (qp->ibqp.qp_type == IB_QPT_UD) {
/* Check UD opcode */
if (wr->opcode != IB_WR_SEND &&
wr->opcode != IB_WR_SEND_WITH_IMM)
goto bail_inval;
/* Check UD destination address PD */
if (qp->ibqp.pd != wr->wr.ud.ah->pd)
goto bail_inval;
} else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
goto bail_inval;
else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
(wr->num_sge == 0 ||
wr->sg_list[0].length < sizeof(u64) ||
wr->sg_list[0].addr & (sizeof(u64) - 1)))
goto bail_inval;
else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
goto bail_inval;
next = qp->s_head + 1;
if (next >= qp->s_size)
next = 0;
if (next == qp->s_last) {
ret = -ENOMEM;
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_head);
wqe->wr = *wr;
wqe->length = 0;
if (wr->num_sge) {
acc = wr->opcode >= IB_WR_RDMA_READ ?
IB_ACCESS_LOCAL_WRITE : 0;
for (i = 0, j = 0; i < wr->num_sge; i++) {
u32 length = wr->sg_list[i].length;
int ok;
if (length == 0)
continue;
ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
&wr->sg_list[i], acc);
if (!ok)
goto bail_inval;
wqe->length += length;
j++;
}
wqe->wr.num_sge = j;
}
if (qp->ibqp.qp_type == IB_QPT_UC ||
qp->ibqp.qp_type == IB_QPT_RC) {
if (wqe->length > 0x80000000U)
goto bail_inval;
} else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
goto bail_inval;
wqe->ssn = qp->s_ssn++;
qp->s_head = next;
ret = 0;
goto bail;
bail_inval:
ret = -EINVAL;
bail:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
/**
* ipath_post_send - post a send on a QP
* @ibqp: the QP to post the send on
* @wr: the list of work requests to post
* @bad_wr: the first bad WR is put here
*
* This may be called from interrupt context.
*/
static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct ipath_qp *qp = to_iqp(ibqp);
int err = 0;
for (; wr; wr = wr->next) {
err = ipath_post_one_send(qp, wr);
if (err) {
*bad_wr = wr;
goto bail;
}
}
/* Try to do the send work in the caller's context. */
ipath_do_send((unsigned long) qp);
bail:
return err;
}
/**
* ipath_post_receive - post a receive on a QP
* @ibqp: the QP to post the receive on
* @wr: the WR to post
* @bad_wr: the first bad WR is put here
*
* This may be called from interrupt context.
*/
static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct ipath_qp *qp = to_iqp(ibqp);
struct ipath_rwq *wq = qp->r_rq.wq;
unsigned long flags;
int ret;
/* Check that state is OK to post receive. */
if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
*bad_wr = wr;
ret = -EINVAL;
goto bail;
}
for (; wr; wr = wr->next) {
struct ipath_rwqe *wqe;
u32 next;
int i;
if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
*bad_wr = wr;
ret = -EINVAL;
goto bail;
}
spin_lock_irqsave(&qp->r_rq.lock, flags);
next = wq->head + 1;
if (next >= qp->r_rq.size)
next = 0;
if (next == wq->tail) {
spin_unlock_irqrestore(&qp->r_rq.lock, flags);
*bad_wr = wr;
ret = -ENOMEM;
goto bail;
}
wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
wqe->wr_id = wr->wr_id;
wqe->num_sge = wr->num_sge;
for (i = 0; i < wr->num_sge; i++)
wqe->sg_list[i] = wr->sg_list[i];
/* Make sure queue entry is written before the head index. */
smp_wmb();
wq->head = next;
spin_unlock_irqrestore(&qp->r_rq.lock, flags);
}
ret = 0;
bail:
return ret;
}
/**
* ipath_qp_rcv - processing an incoming packet on a QP
* @dev: the device the packet came on
* @hdr: the packet header
* @has_grh: true if the packet has a GRH
* @data: the packet data
* @tlen: the packet length
* @qp: the QP the packet came on
*
* This is called from ipath_ib_rcv() to process an incoming packet
* for the given QP.
* Called at interrupt level.
*/
static void ipath_qp_rcv(struct ipath_ibdev *dev,
struct ipath_ib_header *hdr, int has_grh,
void *data, u32 tlen, struct ipath_qp *qp)
{
/* Check for valid receive state. */
if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
dev->n_pkt_drops++;
return;
}
switch (qp->ibqp.qp_type) {
case IB_QPT_SMI:
case IB_QPT_GSI:
if (ib_ipath_disable_sma)
break;
/* FALLTHROUGH */
case IB_QPT_UD:
ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
break;
case IB_QPT_RC:
ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
break;
case IB_QPT_UC:
ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
break;
default:
break;
}
}
/**
* ipath_ib_rcv - process an incoming packet
* @arg: the device pointer
* @rhdr: the header of the packet
* @data: the packet data
* @tlen: the packet length
*
* This is called from ipath_kreceive() to process an incoming packet at
* interrupt level. Tlen is the length of the header + data + CRC in bytes.
*/
void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
u32 tlen)
{
struct ipath_ib_header *hdr = rhdr;
struct ipath_other_headers *ohdr;
struct ipath_qp *qp;
u32 qp_num;
int lnh;
u8 opcode;
u16 lid;
if (unlikely(dev == NULL))
goto bail;
if (unlikely(tlen < 24)) { /* LRH+BTH+CRC */
dev->rcv_errors++;
goto bail;
}
/* Check for a valid destination LID (see ch. 7.11.1). */
lid = be16_to_cpu(hdr->lrh[1]);
if (lid < IPATH_MULTICAST_LID_BASE) {
lid &= ~((1 << dev->dd->ipath_lmc) - 1);
if (unlikely(lid != dev->dd->ipath_lid)) {
dev->rcv_errors++;
goto bail;
}
}
/* Check for GRH */
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
if (lnh == IPATH_LRH_BTH)
ohdr = &hdr->u.oth;
else if (lnh == IPATH_LRH_GRH)
ohdr = &hdr->u.l.oth;
else {
dev->rcv_errors++;
goto bail;
}
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
dev->opstats[opcode].n_bytes += tlen;
dev->opstats[opcode].n_packets++;
/* Get the destination QP number. */
qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
if (qp_num == IPATH_MULTICAST_QPN) {
struct ipath_mcast *mcast;
struct ipath_mcast_qp *p;
if (lnh != IPATH_LRH_GRH) {
dev->n_pkt_drops++;
goto bail;
}
mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
if (mcast == NULL) {
dev->n_pkt_drops++;
goto bail;
}
dev->n_multicast_rcv++;
list_for_each_entry_rcu(p, &mcast->qp_list, list)
ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
/*
* Notify ipath_multicast_detach() if it is waiting for us
* to finish.
*/
if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait);
} else {
qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
if (qp) {
dev->n_unicast_rcv++;
ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
tlen, qp);
/*
* Notify ipath_destroy_qp() if it is waiting
* for us to finish.
*/
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
} else
dev->n_pkt_drops++;
}
bail:;
}
/**
* ipath_ib_timer - verbs timer
* @arg: the device pointer
*
* This is called from ipath_do_rcv_timer() at interrupt level to check for
* QPs which need retransmits and to collect performance numbers.
*/
static void ipath_ib_timer(struct ipath_ibdev *dev)
{
struct ipath_qp *resend = NULL;
struct ipath_qp *rnr = NULL;
struct list_head *last;
struct ipath_qp *qp;
unsigned long flags;
if (dev == NULL)
return;
spin_lock_irqsave(&dev->pending_lock, flags);
/* Start filling the next pending queue. */
if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
dev->pending_index = 0;
/* Save any requests still in the new queue, they have timed out. */
last = &dev->pending[dev->pending_index];
while (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait);
list_del_init(&qp->timerwait);
qp->timer_next = resend;
resend = qp;
atomic_inc(&qp->refcount);
}
last = &dev->rnrwait;
if (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait);
if (--qp->s_rnr_timeout == 0) {
do {
list_del_init(&qp->timerwait);
qp->timer_next = rnr;
rnr = qp;
atomic_inc(&qp->refcount);
if (list_empty(last))
break;
qp = list_entry(last->next, struct ipath_qp,
timerwait);
} while (qp->s_rnr_timeout == 0);
}
}
/*
* We should only be in the started state if pma_sample_start != 0
*/
if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
--dev->pma_sample_start == 0) {
dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
&dev->ipath_rword,
&dev->ipath_spkts,
&dev->ipath_rpkts,
&dev->ipath_xmit_wait);
}
if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
if (dev->pma_sample_interval == 0) {
u64 ta, tb, tc, td, te;
dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
ipath_snapshot_counters(dev->dd, &ta, &tb,
&tc, &td, &te);
dev->ipath_sword = ta - dev->ipath_sword;
dev->ipath_rword = tb - dev->ipath_rword;
dev->ipath_spkts = tc - dev->ipath_spkts;
dev->ipath_rpkts = td - dev->ipath_rpkts;
dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
}
else
dev->pma_sample_interval--;
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
/* XXX What if timer fires again while this is running? */
while (resend != NULL) {
qp = resend;
resend = qp->timer_next;
spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_last != qp->s_tail &&
ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
dev->n_timeouts++;
ipath_restart_rc(qp, qp->s_last_psn + 1);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify ipath_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
while (rnr != NULL) {
qp = rnr;
rnr = qp->timer_next;
spin_lock_irqsave(&qp->s_lock, flags);
if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
ipath_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify ipath_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
}
static void update_sge(struct ipath_sge_state *ss, u32 length)
{
struct ipath_sge *sge = &ss->sge;
sge->vaddr += length;
sge->length -= length;
sge->sge_length -= length;
if (sge->sge_length == 0) {
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr != NULL) {
if (++sge->n >= IPATH_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
return;
sge->n = 0;
}
sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
}
}
#ifdef __LITTLE_ENDIAN
static inline u32 get_upper_bits(u32 data, u32 shift)
{
return data >> shift;
}
static inline u32 set_upper_bits(u32 data, u32 shift)
{
return data << shift;
}
static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
{
data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
return data;
}
#else
static inline u32 get_upper_bits(u32 data, u32 shift)
{
return data << shift;
}
static inline u32 set_upper_bits(u32 data, u32 shift)
{
return data >> shift;
}
static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
{
data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
return data;
}
#endif
static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
u32 length, unsigned flush_wc)
{
u32 extra = 0;
u32 data = 0;
u32 last;
while (1) {
u32 len = ss->sge.length;
u32 off;
if (len > length)
len = length;
if (len > ss->sge.sge_length)
len = ss->sge.sge_length;
BUG_ON(len == 0);
/* If the source address is not aligned, try to align it. */
off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
if (off) {
u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
~(sizeof(u32) - 1));
u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
u32 y;
y = sizeof(u32) - off;
if (len > y)
len = y;
if (len + extra >= sizeof(u32)) {
data |= set_upper_bits(v, extra *
BITS_PER_BYTE);
len = sizeof(u32) - extra;
if (len == length) {
last = data;
break;
}
__raw_writel(data, piobuf);
piobuf++;
extra = 0;
data = 0;
} else {
/* Clear unused upper bytes */
data |= clear_upper_bytes(v, len, extra);
if (len == length) {
last = data;
break;
}
extra += len;
}
} else if (extra) {
/* Source address is aligned. */
u32 *addr = (u32 *) ss->sge.vaddr;
int shift = extra * BITS_PER_BYTE;
int ushift = 32 - shift;
u32 l = len;
while (l >= sizeof(u32)) {
u32 v = *addr;
data |= set_upper_bits(v, shift);
__raw_writel(data, piobuf);
data = get_upper_bits(v, ushift);
piobuf++;
addr++;
l -= sizeof(u32);
}
/*
* We still have 'extra' number of bytes leftover.
*/
if (l) {
u32 v = *addr;
if (l + extra >= sizeof(u32)) {
data |= set_upper_bits(v, shift);
len -= l + extra - sizeof(u32);
if (len == length) {
last = data;
break;
}
__raw_writel(data, piobuf);
piobuf++;
extra = 0;
data = 0;
} else {
/* Clear unused upper bytes */
data |= clear_upper_bytes(v, l,
extra);
if (len == length) {
last = data;
break;
}
extra += l;
}
} else if (len == length) {
last = data;
break;
}
} else if (len == length) {
u32 w;
/*
* Need to round up for the last dword in the
* packet.
*/
w = (len + 3) >> 2;
__iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
piobuf += w - 1;
last = ((u32 *) ss->sge.vaddr)[w - 1];
break;
} else {
u32 w = len >> 2;
__iowrite32_copy(piobuf, ss->sge.vaddr, w);
piobuf += w;
extra = len & (sizeof(u32) - 1);
if (extra) {
u32 v = ((u32 *) ss->sge.vaddr)[w];
/* Clear unused upper bytes */
data = clear_upper_bytes(v, extra, 0);
}
}
update_sge(ss, len);
length -= len;
}
/* Update address before sending packet. */
update_sge(ss, length);
if (flush_wc) {
/* must flush early everything before trigger word */
ipath_flush_wc();
__raw_writel(last, piobuf);
/* be sure trigger word is written */
ipath_flush_wc();
} else
__raw_writel(last, piobuf);
}
/*
* Convert IB rate to delay multiplier.
*/
unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
case IB_RATE_2_5_GBPS: return 8;
case IB_RATE_5_GBPS: return 4;
case IB_RATE_10_GBPS: return 2;
case IB_RATE_20_GBPS: return 1;
default: return 0;
}
}
/*
* Convert delay multiplier to IB rate
*/
static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
{
switch (mult) {
case 8: return IB_RATE_2_5_GBPS;
case 4: return IB_RATE_5_GBPS;
case 2: return IB_RATE_10_GBPS;
case 1: return IB_RATE_20_GBPS;
default: return IB_RATE_PORT_CURRENT;
}
}
static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
{
struct ipath_verbs_txreq *tx = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->pending_lock, flags);
if (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
list_del(l);
tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
return tx;
}
static inline void put_txreq(struct ipath_ibdev *dev,
struct ipath_verbs_txreq *tx)
{
unsigned long flags;
spin_lock_irqsave(&dev->pending_lock, flags);
list_add(&tx->txreq.list, &dev->txreq_free);
spin_unlock_irqrestore(&dev->pending_lock, flags);
}
static void sdma_complete(void *cookie, int status)
{
struct ipath_verbs_txreq *tx = cookie;
struct ipath_qp *qp = tx->qp;
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
unsigned long flags;
enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
if (atomic_dec_and_test(&qp->s_dma_busy)) {
spin_lock_irqsave(&qp->s_lock, flags);
if (tx->wqe)
ipath_send_complete(qp, tx->wqe, ibs);
if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
qp->s_last != qp->s_head) ||
(qp->s_flags & IPATH_S_WAIT_DMA))
ipath_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
wake_up(&qp->wait_dma);
} else if (tx->wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
ipath_send_complete(qp, tx->wqe, ibs);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
kfree(tx->txreq.map_addr);
put_txreq(dev, tx);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
static void decrement_dma_busy(struct ipath_qp *qp)
{
unsigned long flags;
if (atomic_dec_and_test(&qp->s_dma_busy)) {
spin_lock_irqsave(&qp->s_lock, flags);
if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
qp->s_last != qp->s_head) ||
(qp->s_flags & IPATH_S_WAIT_DMA))
ipath_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
wake_up(&qp->wait_dma);
}
}
/*
* Compute the number of clock cycles of delay before sending the next packet.
* The multipliers reflect the number of clocks for the fastest rate so
* one tick at 4xDDR is 8 ticks at 1xSDR.
* If the destination port will take longer to receive a packet than
* the outgoing link can send it, we need to delay sending the next packet
* by the difference in time it takes the receiver to receive and the sender
* to send this packet.
* Note that this delay is always correct for UC and RC but not always
* optimal for UD. For UD, the destination HCA can be different for each
* packet, in which case, we could send packets to a different destination
* while "waiting" for the delay. The overhead for doing this without
* HW support is more than just paying the cost of delaying some packets
* unnecessarily.
*/
static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
{
return (rcv_mult > snd_mult) ?
(plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
}
static int ipath_verbs_send_dma(struct ipath_qp *qp,
struct ipath_ib_header *hdr, u32 hdrwords,
struct ipath_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ipath_devdata *dd = dev->dd;
struct ipath_verbs_txreq *tx;
u32 *piobuf;
u32 control;
u32 ndesc;
int ret;
tx = qp->s_tx;
if (tx) {
qp->s_tx = NULL;
/* resend previously constructed packet */
atomic_inc(&qp->s_dma_busy);
ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
if (ret) {
qp->s_tx = tx;
decrement_dma_busy(qp);
}
goto bail;
}
tx = get_txreq(dev);
if (!tx) {
ret = -EBUSY;
goto bail;
}
/*
* Get the saved delay count we computed for the previous packet
* and save the delay count for this packet to be used next time
* we get here.
*/
control = qp->s_pkt_delay;
qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
tx->qp = qp;
atomic_inc(&qp->refcount);
tx->wqe = qp->s_wqe;
tx->txreq.callback = sdma_complete;
tx->txreq.callback_cookie = tx;
tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
/* VL15 packets bypass credit check */
if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
control |= 1ULL << 31;
tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
}
if (len) {
/*
* Don't try to DMA if it takes more descriptors than
* the queue holds.
*/
ndesc = ipath_count_sge(ss, len);
if (ndesc >= dd->ipath_sdma_descq_cnt)
ndesc = 0;
} else
ndesc = 1;
if (ndesc) {
tx->hdr.pbc[0] = cpu_to_le32(plen);
tx->hdr.pbc[1] = cpu_to_le32(control);
memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
tx->txreq.sg_count = ndesc;
tx->map_len = (hdrwords + 2) << 2;
tx->txreq.map_addr = &tx->hdr;
atomic_inc(&qp->s_dma_busy);
ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
if (ret) {
/* save ss and length in dwords */
tx->ss = ss;
tx->len = dwords;
qp->s_tx = tx;
decrement_dma_busy(qp);
}
goto bail;
}
/* Allocate a buffer and copy the header and payload to it. */
tx->map_len = (plen + 1) << 2;
piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
if (unlikely(piobuf == NULL)) {
ret = -EBUSY;
goto err_tx;
}
tx->txreq.map_addr = piobuf;
tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
tx->txreq.sg_count = 1;
*piobuf++ = (__force u32) cpu_to_le32(plen);
*piobuf++ = (__force u32) cpu_to_le32(control);
memcpy(piobuf, hdr, hdrwords << 2);
ipath_copy_from_sge(piobuf + hdrwords, ss, len);
atomic_inc(&qp->s_dma_busy);
ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
/*
* If we couldn't queue the DMA request, save the info
* and try again later rather than destroying the
* buffer and undoing the side effects of the copy.
*/
if (ret) {
tx->ss = NULL;
tx->len = 0;
qp->s_tx = tx;
decrement_dma_busy(qp);
}
dev->n_unaligned++;
goto bail;
err_tx:
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
put_txreq(dev, tx);
bail:
return ret;
}
static int ipath_verbs_send_pio(struct ipath_qp *qp,
struct ipath_ib_header *ibhdr, u32 hdrwords,
struct ipath_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
u32 *hdr = (u32 *) ibhdr;
u32 __iomem *piobuf;
unsigned flush_wc;
u32 control;
int ret;
unsigned long flags;
piobuf = ipath_getpiobuf(dd, plen, NULL);
if (unlikely(piobuf == NULL)) {
ret = -EBUSY;
goto bail;
}
/*
* Get the saved delay count we computed for the previous packet
* and save the delay count for this packet to be used next time
* we get here.
*/
control = qp->s_pkt_delay;
qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
/* VL15 packets bypass credit check */
if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
control |= 1ULL << 31;
/*
* Write the length to the control qword plus any needed flags.
* We have to flush after the PBC for correctness on some cpus
* or WC buffer can be written out of order.
*/
writeq(((u64) control << 32) | plen, piobuf);
piobuf += 2;
flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
if (len == 0) {
/*
* If there is just the header portion, must flush before
* writing last word of header for correctness, and after
* the last header word (trigger word).
*/
if (flush_wc) {
ipath_flush_wc();
__iowrite32_copy(piobuf, hdr, hdrwords - 1);
ipath_flush_wc();
__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
ipath_flush_wc();
} else
__iowrite32_copy(piobuf, hdr, hdrwords);
goto done;
}
if (flush_wc)
ipath_flush_wc();
__iowrite32_copy(piobuf, hdr, hdrwords);
piobuf += hdrwords;
/* The common case is aligned and contained in one segment. */
if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
!((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
u32 *addr = (u32 *) ss->sge.vaddr;
/* Update address before sending packet. */
update_sge(ss, len);
if (flush_wc) {
__iowrite32_copy(piobuf, addr, dwords - 1);
/* must flush early everything before trigger word */
ipath_flush_wc();
__raw_writel(addr[dwords - 1], piobuf + dwords - 1);
/* be sure trigger word is written */
ipath_flush_wc();
} else
__iowrite32_copy(piobuf, addr, dwords);
goto done;
}
copy_io(piobuf, ss, len, flush_wc);
done:
if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
ret = 0;
bail:
return ret;
}
/**
* ipath_verbs_send - send a packet
* @qp: the QP to send on
* @hdr: the packet header
* @hdrwords: the number of 32-bit words in the header
* @ss: the SGE to send
* @len: the length of the packet in bytes
*/
int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
u32 hdrwords, struct ipath_sge_state *ss, u32 len)
{
struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
u32 plen;
int ret;
u32 dwords = (len + 3) >> 2;
/*
* Calculate the send buffer trigger address.
* The +1 counts for the pbc control dword following the pbc length.
*/
plen = hdrwords + dwords + 1;
/*
* VL15 packets (IB_QPT_SMI) will always use PIO, so we
* can defer SDMA restart until link goes ACTIVE without
* worrying about just how we got there.
*/
if (qp->ibqp.qp_type == IB_QPT_SMI ||
!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
plen, dwords);
else
ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
plen, dwords);
return ret;
}
int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
u64 *rwords, u64 *spkts, u64 *rpkts,
u64 *xmit_wait)
{
int ret;
if (!(dd->ipath_flags & IPATH_INITTED)) {
/* no hardware, freeze, etc. */
ret = -EINVAL;
goto bail;
}
*swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
*rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
*spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
*rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
*xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
ret = 0;
bail:
return ret;
}
/**
* ipath_get_counters - get various chip counters
* @dd: the infinipath device
* @cntrs: counters are placed here
*
* Return the counters needed by recv_pma_get_portcounters().
*/
int ipath_get_counters(struct ipath_devdata *dd,
struct ipath_verbs_counters *cntrs)
{
struct ipath_cregs const *crp = dd->ipath_cregs;
int ret;
if (!(dd->ipath_flags & IPATH_INITTED)) {
/* no hardware, freeze, etc. */
ret = -EINVAL;
goto bail;
}
cntrs->symbol_error_counter =
ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
cntrs->link_error_recovery_counter =
ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
/*
* The link downed counter counts when the other side downs the
* connection. We add in the number of times we downed the link
* due to local link integrity errors to compensate.
*/
cntrs->link_downed_counter =
ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
cntrs->port_rcv_errors =
ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
ipath_snap_cntr(dd, crp->cr_portovflcnt) +
ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
ipath_snap_cntr(dd, crp->cr_erricrccnt) +
ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
ipath_snap_cntr(dd, crp->cr_badformatcnt) +
dd->ipath_rxfc_unsupvl_errs;
if (crp->cr_rxotherlocalphyerrcnt)
cntrs->port_rcv_errors +=
ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
if (crp->cr_rxvlerrcnt)
cntrs->port_rcv_errors +=
ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
cntrs->port_rcv_remphys_errors =
ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
cntrs->local_link_integrity_errors =
crp->cr_locallinkintegrityerrcnt ?
ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
dd->ipath_lli_errs : dd->ipath_lli_errors);
cntrs->excessive_buffer_overrun_errors =
crp->cr_excessbufferovflcnt ?
ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
dd->ipath_overrun_thresh_errs;
cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
ret = 0;
bail:
return ret;
}
/**
* ipath_ib_piobufavail - callback when a PIO buffer is available
* @arg: the device pointer
*
* This is called from ipath_intr() at interrupt level when a PIO buffer is
* available after ipath_verbs_send() returned an error that no buffers were
* available. Return 1 if we consumed all the PIO buffers and we still have
* QPs waiting for buffers (for now, just restart the send tasklet and
* return zero).
*/
int ipath_ib_piobufavail(struct ipath_ibdev *dev)
{
struct list_head *list;
struct ipath_qp *qplist;
struct ipath_qp *qp;
unsigned long flags;
if (dev == NULL)
goto bail;
list = &dev->piowait;
qplist = NULL;
spin_lock_irqsave(&dev->pending_lock, flags);
while (!list_empty(list)) {
qp = list_entry(list->next, struct ipath_qp, piowait);
list_del_init(&qp->piowait);
qp->pio_next = qplist;
qplist = qp;
atomic_inc(&qp->refcount);
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
while (qplist != NULL) {
qp = qplist;
qplist = qp->pio_next;
spin_lock_irqsave(&qp->s_lock, flags);
if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
ipath_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify ipath_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
bail:
return 0;
}
static int ipath_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct ipath_ibdev *dev = to_idev(ibdev);
memset(props, 0, sizeof(*props));
props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
props->page_size_cap = PAGE_SIZE;
props->vendor_id =
IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3;
props->vendor_part_id = dev->dd->ipath_deviceid;
props->hw_ver = dev->dd->ipath_pcirev;
props->sys_image_guid = dev->sys_image_guid;
props->max_mr_size = ~0ull;
props->max_qp = ib_ipath_max_qps;
props->max_qp_wr = ib_ipath_max_qp_wrs;
props->max_sge = ib_ipath_max_sges;
props->max_cq = ib_ipath_max_cqs;
props->max_ah = ib_ipath_max_ahs;
props->max_cqe = ib_ipath_max_cqes;
props->max_mr = dev->lk_table.max;
props->max_fmr = dev->lk_table.max;
props->max_map_per_fmr = 32767;
props->max_pd = ib_ipath_max_pds;
props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
props->max_qp_init_rd_atom = 255;
/* props->max_res_rd_atom */
props->max_srq = ib_ipath_max_srqs;
props->max_srq_wr = ib_ipath_max_srq_wrs;
props->max_srq_sge = ib_ipath_max_srq_sges;
/* props->local_ca_ack_delay */
props->atomic_cap = IB_ATOMIC_GLOB;
props->max_pkeys = ipath_get_npkeys(dev->dd);
props->max_mcast_grp = ib_ipath_max_mcast_grps;
props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
return 0;
}
const u8 ipath_cvt_physportstate[32] = {
[INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
[INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
[INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
[INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
[INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
[INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
[INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
IB_PHYSPORTSTATE_CFG_TRAIN,
[INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
IB_PHYSPORTSTATE_CFG_TRAIN,
[INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
IB_PHYSPORTSTATE_CFG_TRAIN,
[INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
[INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
};
u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
{
return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
}
static int ipath_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_devdata *dd = dev->dd;
enum ib_mtu mtu;
u16 lid = dd->ipath_lid;
u64 ibcstat;
memset(props, 0, sizeof(*props));
props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
props->lmc = dd->ipath_lmc;
props->sm_lid = dev->sm_lid;
props->sm_sl = dev->sm_sl;
ibcstat = dd->ipath_lastibcstat;
/* map LinkState to IB portinfo values. */
props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
/* See phys_state_show() */
props->phys_state = /* MEA: assumes shift == 0 */
ipath_cvt_physportstate[dd->ipath_lastibcstat &
dd->ibcs_lts_mask];
props->port_cap_flags = dev->port_cap_flags;
props->gid_tbl_len = 1;
props->max_msg_sz = 0x80000000;
props->pkey_tbl_len = ipath_get_npkeys(dd);
props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
dev->z_pkey_violations;
props->qkey_viol_cntr = dev->qkey_violations;
props->active_width = dd->ipath_link_width_active;
/* See rate_show() */
props->active_speed = dd->ipath_link_speed_active;
props->max_vl_num = 1; /* VLCap = VL0 */
props->init_type_reply = 0;
props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
switch (dd->ipath_ibmtu) {
case 4096:
mtu = IB_MTU_4096;
break;
case 2048:
mtu = IB_MTU_2048;
break;
case 1024:
mtu = IB_MTU_1024;
break;
case 512:
mtu = IB_MTU_512;
break;
case 256:
mtu = IB_MTU_256;
break;
default:
mtu = IB_MTU_2048;
}
props->active_mtu = mtu;
props->subnet_timeout = dev->subnet_timeout;
return 0;
}
static int ipath_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify)
{
int ret;
if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC)) {
ret = -EOPNOTSUPP;
goto bail;
}
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
memcpy(device->node_desc, device_modify->node_desc, 64);
if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
to_idev(device)->sys_image_guid =
cpu_to_be64(device_modify->sys_image_guid);
ret = 0;
bail:
return ret;
}
static int ipath_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
struct ib_port_modify *props)
{
struct ipath_ibdev *dev = to_idev(ibdev);
dev->port_cap_flags |= props->set_port_cap_mask;
dev->port_cap_flags &= ~props->clr_port_cap_mask;
if (port_modify_mask & IB_PORT_SHUTDOWN)
ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
dev->qkey_violations = 0;
return 0;
}
static int ipath_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid)
{
struct ipath_ibdev *dev = to_idev(ibdev);
int ret;
if (index >= 1) {
ret = -EINVAL;
goto bail;
}
gid->global.subnet_prefix = dev->gid_prefix;
gid->global.interface_id = dev->dd->ipath_guid;
ret = 0;
bail:
return ret;
}
static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_pd *pd;
struct ib_pd *ret;
/*
* This is actually totally arbitrary. Some correctness tests
* assume there's a maximum number of PDs that can be allocated.
* We don't actually have this limit, but we fail the test if
* we allow allocations of more than we report for this value.
*/
pd = kmalloc(sizeof *pd, GFP_KERNEL);
if (!pd) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
spin_lock(&dev->n_pds_lock);
if (dev->n_pds_allocated == ib_ipath_max_pds) {
spin_unlock(&dev->n_pds_lock);
kfree(pd);
ret = ERR_PTR(-ENOMEM);
goto bail;
}
dev->n_pds_allocated++;
spin_unlock(&dev->n_pds_lock);
/* ib_alloc_pd() will initialize pd->ibpd. */
pd->user = udata != NULL;
ret = &pd->ibpd;
bail:
return ret;
}
static int ipath_dealloc_pd(struct ib_pd *ibpd)
{
struct ipath_pd *pd = to_ipd(ibpd);
struct ipath_ibdev *dev = to_idev(ibpd->device);
spin_lock(&dev->n_pds_lock);
dev->n_pds_allocated--;
spin_unlock(&dev->n_pds_lock);
kfree(pd);
return 0;
}
/**
* ipath_create_ah - create an address handle
* @pd: the protection domain
* @ah_attr: the attributes of the AH
*
* This may be called from interrupt context.
*/
static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
struct ipath_ah *ah;
struct ib_ah *ret;
struct ipath_ibdev *dev = to_idev(pd->device);
unsigned long flags;
/* A multicast address requires a GRH (see ch. 8.4.1). */
if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
ah_attr->dlid != IPATH_PERMISSIVE_LID &&
!(ah_attr->ah_flags & IB_AH_GRH)) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
if (ah_attr->dlid == 0) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
if (ah_attr->port_num < 1 ||
ah_attr->port_num > pd->device->phys_port_cnt) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
ah = kmalloc(sizeof *ah, GFP_ATOMIC);
if (!ah) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
spin_lock_irqsave(&dev->n_ahs_lock, flags);
if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
kfree(ah);
ret = ERR_PTR(-ENOMEM);
goto bail;
}
dev->n_ahs_allocated++;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
/* ib_create_ah() will initialize ah->ibah. */
ah->attr = *ah_attr;
ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
ret = &ah->ibah;
bail:
return ret;
}
/**
* ipath_destroy_ah - destroy an address handle
* @ibah: the AH to destroy
*
* This may be called from interrupt context.
*/
static int ipath_destroy_ah(struct ib_ah *ibah)
{
struct ipath_ibdev *dev = to_idev(ibah->device);
struct ipath_ah *ah = to_iah(ibah);
unsigned long flags;
spin_lock_irqsave(&dev->n_ahs_lock, flags);
dev->n_ahs_allocated--;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
kfree(ah);
return 0;
}
static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
{
struct ipath_ah *ah = to_iah(ibah);
*ah_attr = ah->attr;
ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
return 0;
}
/**
* ipath_get_npkeys - return the size of the PKEY table for port 0
* @dd: the infinipath device
*/
unsigned ipath_get_npkeys(struct ipath_devdata *dd)
{
return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
}
/**
* ipath_get_pkey - return the indexed PKEY from the port PKEY table
* @dd: the infinipath device
* @index: the PKEY index
*/
unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
{
unsigned ret;
/* always a kernel port, no locking needed */
if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
ret = 0;
else
ret = dd->ipath_pd[0]->port_pkeys[index];
return ret;
}
static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
struct ipath_ibdev *dev = to_idev(ibdev);
int ret;
if (index >= ipath_get_npkeys(dev->dd)) {
ret = -EINVAL;
goto bail;
}
*pkey = ipath_get_pkey(dev->dd, index);
ret = 0;
bail:
return ret;
}
/**
* ipath_alloc_ucontext - allocate a ucontest
* @ibdev: the infiniband device
* @udata: not used by the InfiniPath driver
*/
static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct ipath_ucontext *context;
struct ib_ucontext *ret;
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
ret = &context->ibucontext;
bail:
return ret;
}
static int ipath_dealloc_ucontext(struct ib_ucontext *context)
{
kfree(to_iucontext(context));
return 0;
}
static int ipath_verbs_register_sysfs(struct ib_device *dev);
static void __verbs_timer(unsigned long arg)
{
struct ipath_devdata *dd = (struct ipath_devdata *) arg;
/* Handle verbs layer timeouts. */
ipath_ib_timer(dd->verbs_dev);
mod_timer(&dd->verbs_timer, jiffies + 1);
}
static int enable_timer(struct ipath_devdata *dd)
{
/*
* Early chips had a design flaw where the chip and kernel idea
* of the tail register don't always agree, and therefore we won't
* get an interrupt on the next packet received.
* If the board supports per packet receive interrupts, use it.
* Otherwise, the timer function periodically checks for packets
* to cover this case.
* Either way, the timer is needed for verbs layer related
* processing.
*/
if (dd->ipath_flags & IPATH_GPIO_INTR) {
ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
0x2074076542310ULL);
/* Enable GPIO bit 2 interrupt */
dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
dd->ipath_gpio_mask);
}
init_timer(&dd->verbs_timer);
dd->verbs_timer.function = __verbs_timer;
dd->verbs_timer.data = (unsigned long)dd;
dd->verbs_timer.expires = jiffies + 1;
add_timer(&dd->verbs_timer);
return 0;
}
static int disable_timer(struct ipath_devdata *dd)
{
/* Disable GPIO bit 2 interrupt */
if (dd->ipath_flags & IPATH_GPIO_INTR) {
/* Disable GPIO bit 2 interrupt */
dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
dd->ipath_gpio_mask);
/*
* We might want to undo changes to debugportselect,
* but how?
*/
}
del_timer_sync(&dd->verbs_timer);
return 0;
}
/**
* ipath_register_ib_device - register our device with the infiniband core
* @dd: the device data structure
* Return the allocated ipath_ibdev pointer or NULL on error.
*/
int ipath_register_ib_device(struct ipath_devdata *dd)
{
struct ipath_verbs_counters cntrs;
struct ipath_ibdev *idev;
struct ib_device *dev;
struct ipath_verbs_txreq *tx;
unsigned i;
int ret;
idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
if (idev == NULL) {
ret = -ENOMEM;
goto bail;
}
dev = &idev->ibdev;
if (dd->ipath_sdma_descq_cnt) {
tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
GFP_KERNEL);
if (tx == NULL) {
ret = -ENOMEM;
goto err_tx;
}
} else
tx = NULL;
idev->txreq_bufs = tx;
/* Only need to initialize non-zero fields. */
spin_lock_init(&idev->n_pds_lock);
spin_lock_init(&idev->n_ahs_lock);
spin_lock_init(&idev->n_cqs_lock);
spin_lock_init(&idev->n_qps_lock);
spin_lock_init(&idev->n_srqs_lock);
spin_lock_init(&idev->n_mcast_grps_lock);
spin_lock_init(&idev->qp_table.lock);
spin_lock_init(&idev->lk_table.lock);
idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
/* Set the prefix to the default value (see ch. 4.1.1) */
idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
if (ret)
goto err_qp;
/*
* The top ib_ipath_lkey_table_size bits are used to index the
* table. The lower 8 bits can be owned by the user (copied from
* the LKEY). The remaining bits act as a generation number or tag.
*/
idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
idev->lk_table.table = kzalloc(idev->lk_table.max *
sizeof(*idev->lk_table.table),
GFP_KERNEL);
if (idev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
}
INIT_LIST_HEAD(&idev->pending_mmaps);
spin_lock_init(&idev->pending_lock);
idev->mmap_offset = PAGE_SIZE;
spin_lock_init(&idev->mmap_offset_lock);
INIT_LIST_HEAD(&idev->pending[0]);
INIT_LIST_HEAD(&idev->pending[1]);
INIT_LIST_HEAD(&idev->pending[2]);
INIT_LIST_HEAD(&idev->piowait);
INIT_LIST_HEAD(&idev->rnrwait);
INIT_LIST_HEAD(&idev->txreq_free);
idev->pending_index = 0;
idev->port_cap_flags =
IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
/* Snapshot current HW counters to "clear" them. */
ipath_get_counters(dd, &cntrs);
idev->z_symbol_error_counter = cntrs.symbol_error_counter;
idev->z_link_error_recovery_counter =
cntrs.link_error_recovery_counter;
idev->z_link_downed_counter = cntrs.link_downed_counter;
idev->z_port_rcv_errors = cntrs.port_rcv_errors;
idev->z_port_rcv_remphys_errors =
cntrs.port_rcv_remphys_errors;
idev->z_port_xmit_discards = cntrs.port_xmit_discards;
idev->z_port_xmit_data = cntrs.port_xmit_data;
idev->z_port_rcv_data = cntrs.port_rcv_data;
idev->z_port_xmit_packets = cntrs.port_xmit_packets;
idev->z_port_rcv_packets = cntrs.port_rcv_packets;
idev->z_local_link_integrity_errors =
cntrs.local_link_integrity_errors;
idev->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
idev->z_vl15_dropped = cntrs.vl15_dropped;
for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
list_add(&tx->txreq.list, &idev->txreq_free);
/*
* The system image GUID is supposed to be the same for all
* IB HCAs in a single system but since there can be other
* device types in the system, we can't be sure this is unique.
*/
if (!sys_image_guid)
sys_image_guid = dd->ipath_guid;
idev->sys_image_guid = sys_image_guid;
idev->ib_unit = dd->ipath_unit;
idev->dd = dd;
strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
dev->owner = THIS_MODULE;
dev->node_guid = dd->ipath_guid;
dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
dev->uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_CREATE_AH) |
(1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
(1ull << IB_USER_VERBS_CMD_QUERY_AH) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
dev->num_comp_vectors = 1;
dev->dma_device = &dd->pcidev->dev;
dev->query_device = ipath_query_device;
dev->modify_device = ipath_modify_device;
dev->query_port = ipath_query_port;
dev->modify_port = ipath_modify_port;
dev->query_pkey = ipath_query_pkey;
dev->query_gid = ipath_query_gid;
dev->alloc_ucontext = ipath_alloc_ucontext;
dev->dealloc_ucontext = ipath_dealloc_ucontext;
dev->alloc_pd = ipath_alloc_pd;
dev->dealloc_pd = ipath_dealloc_pd;
dev->create_ah = ipath_create_ah;
dev->destroy_ah = ipath_destroy_ah;
dev->query_ah = ipath_query_ah;
dev->create_srq = ipath_create_srq;
dev->modify_srq = ipath_modify_srq;
dev->query_srq = ipath_query_srq;
dev->destroy_srq = ipath_destroy_srq;
dev->create_qp = ipath_create_qp;
dev->modify_qp = ipath_modify_qp;
dev->query_qp = ipath_query_qp;
dev->destroy_qp = ipath_destroy_qp;
dev->post_send = ipath_post_send;
dev->post_recv = ipath_post_receive;
dev->post_srq_recv = ipath_post_srq_receive;
dev->create_cq = ipath_create_cq;
dev->destroy_cq = ipath_destroy_cq;
dev->resize_cq = ipath_resize_cq;
dev->poll_cq = ipath_poll_cq;
dev->req_notify_cq = ipath_req_notify_cq;
dev->get_dma_mr = ipath_get_dma_mr;
dev->reg_phys_mr = ipath_reg_phys_mr;
dev->reg_user_mr = ipath_reg_user_mr;
dev->dereg_mr = ipath_dereg_mr;
dev->alloc_fmr = ipath_alloc_fmr;
dev->map_phys_fmr = ipath_map_phys_fmr;
dev->unmap_fmr = ipath_unmap_fmr;
dev->dealloc_fmr = ipath_dealloc_fmr;
dev->attach_mcast = ipath_multicast_attach;
dev->detach_mcast = ipath_multicast_detach;
dev->process_mad = ipath_process_mad;
dev->mmap = ipath_mmap;
dev->dma_ops = &ipath_dma_mapping_ops;
snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename);
ret = ib_register_device(dev);
if (ret)
goto err_reg;
if (ipath_verbs_register_sysfs(dev))
goto err_class;
enable_timer(dd);
goto bail;
err_class:
ib_unregister_device(dev);
err_reg:
kfree(idev->lk_table.table);
err_lk:
kfree(idev->qp_table.table);
err_qp:
kfree(idev->txreq_bufs);
err_tx:
ib_dealloc_device(dev);
ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
idev = NULL;
bail:
dd->verbs_dev = idev;
return ret;
}
void ipath_unregister_ib_device(struct ipath_ibdev *dev)
{
struct ib_device *ibdev = &dev->ibdev;
u32 qps_inuse;
ib_unregister_device(ibdev);
disable_timer(dev->dd);
if (!list_empty(&dev->pending[0]) ||
!list_empty(&dev->pending[1]) ||
!list_empty(&dev->pending[2]))
ipath_dev_err(dev->dd, "pending list not empty!\n");
if (!list_empty(&dev->piowait))
ipath_dev_err(dev->dd, "piowait list not empty!\n");
if (!list_empty(&dev->rnrwait))
ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
if (!ipath_mcast_tree_empty())
ipath_dev_err(dev->dd, "multicast table memory leak!\n");
/*
* Note that ipath_unregister_ib_device() can be called before all
* the QPs are destroyed!
*/
qps_inuse = ipath_free_all_qps(&dev->qp_table);
if (qps_inuse)
ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n",
qps_inuse);
kfree(dev->qp_table.table);
kfree(dev->lk_table.table);
kfree(dev->txreq_bufs);
ib_dealloc_device(ibdev);
}
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ipath_ibdev *dev =
container_of(device, struct ipath_ibdev, ibdev.dev);
return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ipath_ibdev *dev =
container_of(device, struct ipath_ibdev, ibdev.dev);
int ret;
ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
if (ret < 0)
goto bail;
strcat(buf, "\n");
ret = strlen(buf);
bail:
return ret;
}
static ssize_t show_stats(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ipath_ibdev *dev =
container_of(device, struct ipath_ibdev, ibdev.dev);
int i;
int len;
len = sprintf(buf,
"RC resends %d\n"
"RC no QACK %d\n"
"RC ACKs %d\n"
"RC SEQ NAKs %d\n"
"RC RDMA seq %d\n"
"RC RNR NAKs %d\n"
"RC OTH NAKs %d\n"
"RC timeouts %d\n"
"RC RDMA dup %d\n"
"piobuf wait %d\n"
"unaligned %d\n"
"PKT drops %d\n"
"WQE errs %d\n",
dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
dev->n_other_naks, dev->n_timeouts,
dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
dev->n_pkt_drops, dev->n_wqe_errs);
for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
const struct ipath_opcode_stats *si = &dev->opstats[i];
if (!si->n_packets && !si->n_bytes)
continue;
len += sprintf(buf + len, "%02x %llu/%llu\n", i,
(unsigned long long) si->n_packets,
(unsigned long long) si->n_bytes);
}
return len;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
static struct device_attribute *ipath_class_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_hca_type,
&dev_attr_board_id,
&dev_attr_stats
};
static int ipath_verbs_register_sysfs(struct ib_device *dev)
{
int i;
int ret;
for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
if (device_create_file(&dev->dev,
ipath_class_attributes[i])) {
ret = 1;
goto bail;
}
ret = 0;
bail:
return ret;
}
| gpl-2.0 |
savoca/kernel-msm | drivers/mtd/ofpart.c | 1602 | 4082 | /*
* Flash partitions described by the OF (or flattened) device tree
*
* Copyright © 2006 MontaVista Software Inc.
* Author: Vitaly Wool <vwool@ru.mvista.com>
*
* Revised to handle newer style flash binding by:
* Copyright © 2007 David Gibson, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/mtd/partitions.h>
static int parse_ofpart_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct device_node *node;
const char *partname;
struct device_node *pp;
int nr_parts, i;
if (!data)
return 0;
node = data->of_node;
if (!node)
return 0;
/* First count the subnodes */
pp = NULL;
nr_parts = 0;
while ((pp = of_get_next_child(node, pp)))
nr_parts++;
if (nr_parts == 0)
return 0;
*pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
if (!*pparts)
return -ENOMEM;
pp = NULL;
i = 0;
while ((pp = of_get_next_child(node, pp))) {
const __be32 *reg;
int len;
reg = of_get_property(pp, "reg", &len);
if (!reg) {
nr_parts--;
continue;
}
(*pparts)[i].offset = be32_to_cpu(reg[0]);
(*pparts)[i].size = be32_to_cpu(reg[1]);
partname = of_get_property(pp, "label", &len);
if (!partname)
partname = of_get_property(pp, "name", &len);
(*pparts)[i].name = (char *)partname;
if (of_get_property(pp, "read-only", &len))
(*pparts)[i].mask_flags = MTD_WRITEABLE;
i++;
}
if (!i) {
of_node_put(pp);
pr_err("No valid partition found on %s\n", node->full_name);
kfree(*pparts);
*pparts = NULL;
return -EINVAL;
}
return nr_parts;
}
static struct mtd_part_parser ofpart_parser = {
.owner = THIS_MODULE,
.parse_fn = parse_ofpart_partitions,
.name = "ofpart",
};
static int parse_ofoldpart_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct device_node *dp;
int i, plen, nr_parts;
const struct {
__be32 offset, len;
} *part;
const char *names;
if (!data)
return 0;
dp = data->of_node;
if (!dp)
return 0;
part = of_get_property(dp, "partitions", &plen);
if (!part)
return 0; /* No partitions found */
pr_warning("Device tree uses obsolete partition map binding: %s\n",
dp->full_name);
nr_parts = plen / sizeof(part[0]);
*pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
if (!*pparts)
return -ENOMEM;
names = of_get_property(dp, "partition-names", &plen);
for (i = 0; i < nr_parts; i++) {
(*pparts)[i].offset = be32_to_cpu(part->offset);
(*pparts)[i].size = be32_to_cpu(part->len) & ~1;
/* bit 0 set signifies read only partition */
if (be32_to_cpu(part->len) & 1)
(*pparts)[i].mask_flags = MTD_WRITEABLE;
if (names && (plen > 0)) {
int len = strlen(names) + 1;
(*pparts)[i].name = (char *)names;
plen -= len;
names += len;
} else {
(*pparts)[i].name = "unnamed";
}
part++;
}
return nr_parts;
}
static struct mtd_part_parser ofoldpart_parser = {
.owner = THIS_MODULE,
.parse_fn = parse_ofoldpart_partitions,
.name = "ofoldpart",
};
static int __init ofpart_parser_init(void)
{
int rc;
rc = register_mtd_parser(&ofpart_parser);
if (rc)
goto out;
rc = register_mtd_parser(&ofoldpart_parser);
if (!rc)
return 0;
deregister_mtd_parser(&ofoldpart_parser);
out:
return rc;
}
module_init(ofpart_parser_init);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
MODULE_AUTHOR("Vitaly Wool, David Gibson");
/*
* When MTD core cannot find the requested parser, it tries to load the module
* with the same name. Since we provide the ofoldpart parser, we should have
* the corresponding alias.
*/
MODULE_ALIAS("ofoldpart");
| gpl-2.0 |
myjang0507/Alphabet | drivers/i2c/busses/i2c-sis630.c | 2370 | 14915 | /*
Copyright (c) 2002,2003 Alexander Malysh <amalysh@web.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Status: beta
Supports:
SIS 630
SIS 730
SIS 964
Notable differences between chips:
+------------------------+--------------------+-------------------+
| | SIS630/730 | SIS964 |
+------------------------+--------------------+-------------------+
| Clock | 14kHz/56kHz | 55.56kHz/27.78kHz |
| SMBus registers offset | 0x80 | 0xE0 |
| SMB_CNT | Bit 1 = Slave Busy | Bit 1 = Bus probe |
| (not used yet) | Bit 3 is reserved | Bit 3 = Last byte |
| SMB_PCOUNT | Offset + 0x06 | Offset + 0x14 |
| SMB_COUNT | 4:0 bits | 5:0 bits |
+------------------------+--------------------+-------------------+
(Other differences don't affect the functions provided by the driver)
Note: we assume there can only be one device, with one SMBus interface.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/io.h>
/* SIS964 id is defined here as we are the only file using it */
#define PCI_DEVICE_ID_SI_964 0x0964
/* SIS630/730/964 SMBus registers */
#define SMB_STS 0x00 /* status */
#define SMB_CNT 0x02 /* control */
#define SMBHOST_CNT 0x03 /* host control */
#define SMB_ADDR 0x04 /* address */
#define SMB_CMD 0x05 /* command */
#define SMB_COUNT 0x07 /* byte count */
#define SMB_BYTE 0x08 /* ~0x8F data byte field */
/* SMB_STS register */
#define BYTE_DONE_STS 0x10 /* Byte Done Status / Block Array */
#define SMBCOL_STS 0x04 /* Collision */
#define SMBERR_STS 0x02 /* Device error */
/* SMB_CNT register */
#define MSTO_EN 0x40 /* Host Master Timeout Enable */
#define SMBCLK_SEL 0x20 /* Host master clock selection */
#define SMB_PROBE 0x02 /* Bus Probe/Slave busy */
#define SMB_HOSTBUSY 0x01 /* Host Busy */
/* SMBHOST_CNT register */
#define SMB_KILL 0x20 /* Kill */
#define SMB_START 0x10 /* Start */
/* register count for request_region
* As we don't use SMB_PCOUNT, 20 is ok for SiS630 and SiS964
*/
#define SIS630_SMB_IOREGION 20
/* PCI address constants */
/* acpi base address register */
#define SIS630_ACPI_BASE_REG 0x74
/* bios control register */
#define SIS630_BIOS_CTL_REG 0x40
/* Other settings */
#define MAX_TIMEOUT 500
/* SIS630 constants */
#define SIS630_QUICK 0x00
#define SIS630_BYTE 0x01
#define SIS630_BYTE_DATA 0x02
#define SIS630_WORD_DATA 0x03
#define SIS630_PCALL 0x04
#define SIS630_BLOCK_DATA 0x05
static struct pci_driver sis630_driver;
/* insmod parameters */
static bool high_clock;
static bool force;
module_param(high_clock, bool, 0);
MODULE_PARM_DESC(high_clock,
"Set Host Master Clock to 56KHz (default 14KHz) (SIS630/730 only).");
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!");
/* SMBus base adress */
static unsigned short smbus_base;
/* supported chips */
static int supported[] = {
PCI_DEVICE_ID_SI_630,
PCI_DEVICE_ID_SI_730,
PCI_DEVICE_ID_SI_760,
0 /* terminates the list */
};
static inline u8 sis630_read(u8 reg)
{
return inb(smbus_base + reg);
}
static inline void sis630_write(u8 reg, u8 data)
{
outb(data, smbus_base + reg);
}
static int sis630_transaction_start(struct i2c_adapter *adap, int size,
u8 *oldclock)
{
int temp;
/* Make sure the SMBus host is ready to start transmitting. */
temp = sis630_read(SMB_CNT);
if ((temp & (SMB_PROBE | SMB_HOSTBUSY)) != 0x00) {
dev_dbg(&adap->dev, "SMBus busy (%02x). Resetting...\n", temp);
/* kill smbus transaction */
sis630_write(SMBHOST_CNT, SMB_KILL);
temp = sis630_read(SMB_CNT);
if (temp & (SMB_PROBE | SMB_HOSTBUSY)) {
dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
return -EBUSY;
} else {
dev_dbg(&adap->dev, "Successful!\n");
}
}
/* save old clock, so we can prevent machine for hung */
*oldclock = sis630_read(SMB_CNT);
dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock);
/* disable timeout interrupt,
* set Host Master Clock to 56KHz if requested */
if (high_clock)
sis630_write(SMB_CNT, SMBCLK_SEL);
else
sis630_write(SMB_CNT, (*oldclock & ~MSTO_EN));
/* clear all sticky bits */
temp = sis630_read(SMB_STS);
sis630_write(SMB_STS, temp & 0x1e);
/* start the transaction by setting bit 4 and size */
sis630_write(SMBHOST_CNT, SMB_START | (size & 0x07));
return 0;
}
static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
{
int temp, result = 0, timeout = 0;
/* We will always wait for a fraction of a second! */
do {
msleep(1);
temp = sis630_read(SMB_STS);
/* check if block transmitted */
if (size == SIS630_BLOCK_DATA && (temp & BYTE_DONE_STS))
break;
} while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout > MAX_TIMEOUT) {
dev_dbg(&adap->dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & SMBERR_STS) {
dev_dbg(&adap->dev, "Error: Failed bus transaction\n");
result = -ENXIO;
}
if (temp & SMBCOL_STS) {
dev_err(&adap->dev, "Bus collision!\n");
result = -EAGAIN;
}
return result;
}
static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock)
{
/* clear all status "sticky" bits */
sis630_write(SMB_STS, 0xFF);
dev_dbg(&adap->dev,
"SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT));
/*
* restore old Host Master Clock if high_clock is set
* and oldclock was not 56KHz
*/
if (high_clock && !(oldclock & SMBCLK_SEL))
sis630_write(SMB_CNT, sis630_read(SMB_CNT) & ~SMBCLK_SEL);
dev_dbg(&adap->dev,
"SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT));
}
static int sis630_transaction(struct i2c_adapter *adap, int size)
{
int result = 0;
u8 oldclock = 0;
result = sis630_transaction_start(adap, size, &oldclock);
if (!result) {
result = sis630_transaction_wait(adap, size);
sis630_transaction_end(adap, oldclock);
}
return result;
}
static int sis630_block_data(struct i2c_adapter *adap,
union i2c_smbus_data *data, int read_write)
{
int i, len = 0, rc = 0;
u8 oldclock = 0;
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len < 0)
len = 0;
else if (len > 32)
len = 32;
sis630_write(SMB_COUNT, len);
for (i = 1; i <= len; i++) {
dev_dbg(&adap->dev,
"set data 0x%02x\n", data->block[i]);
/* set data */
sis630_write(SMB_BYTE + (i - 1) % 8, data->block[i]);
if (i == 8 || (len < 8 && i == len)) {
dev_dbg(&adap->dev,
"start trans len=%d i=%d\n", len, i);
/* first transaction */
rc = sis630_transaction_start(adap,
SIS630_BLOCK_DATA, &oldclock);
if (rc)
return rc;
} else if ((i - 1) % 8 == 7 || i == len) {
dev_dbg(&adap->dev,
"trans_wait len=%d i=%d\n", len, i);
if (i > 8) {
dev_dbg(&adap->dev,
"clear smbary_sts"
" len=%d i=%d\n", len, i);
/*
If this is not first transaction,
we must clear sticky bit.
clear SMBARY_STS
*/
sis630_write(SMB_STS, BYTE_DONE_STS);
}
rc = sis630_transaction_wait(adap,
SIS630_BLOCK_DATA);
if (rc) {
dev_dbg(&adap->dev,
"trans_wait failed\n");
break;
}
}
}
} else {
/* read request */
data->block[0] = len = 0;
rc = sis630_transaction_start(adap,
SIS630_BLOCK_DATA, &oldclock);
if (rc)
return rc;
do {
rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA);
if (rc) {
dev_dbg(&adap->dev, "trans_wait failed\n");
break;
}
/* if this first transaction then read byte count */
if (len == 0)
data->block[0] = sis630_read(SMB_COUNT);
/* just to be sure */
if (data->block[0] > 32)
data->block[0] = 32;
dev_dbg(&adap->dev,
"block data read len=0x%x\n", data->block[0]);
for (i = 0; i < 8 && len < data->block[0]; i++, len++) {
dev_dbg(&adap->dev,
"read i=%d len=%d\n", i, len);
data->block[len + 1] = sis630_read(SMB_BYTE +
i);
}
dev_dbg(&adap->dev,
"clear smbary_sts len=%d i=%d\n", len, i);
/* clear SMBARY_STS */
sis630_write(SMB_STS, BYTE_DONE_STS);
} while (len < data->block[0]);
}
sis630_transaction_end(adap, oldclock);
return rc;
}
/* Return negative errno on error. */
static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
int status;
switch (size) {
case I2C_SMBUS_QUICK:
sis630_write(SMB_ADDR,
((addr & 0x7f) << 1) | (read_write & 0x01));
size = SIS630_QUICK;
break;
case I2C_SMBUS_BYTE:
sis630_write(SMB_ADDR,
((addr & 0x7f) << 1) | (read_write & 0x01));
if (read_write == I2C_SMBUS_WRITE)
sis630_write(SMB_CMD, command);
size = SIS630_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
sis630_write(SMB_ADDR,
((addr & 0x7f) << 1) | (read_write & 0x01));
sis630_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE)
sis630_write(SMB_BYTE, data->byte);
size = SIS630_BYTE_DATA;
break;
case I2C_SMBUS_PROC_CALL:
case I2C_SMBUS_WORD_DATA:
sis630_write(SMB_ADDR,
((addr & 0x7f) << 1) | (read_write & 0x01));
sis630_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE) {
sis630_write(SMB_BYTE, data->word & 0xff);
sis630_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8);
}
size = (size == I2C_SMBUS_PROC_CALL ?
SIS630_PCALL : SIS630_WORD_DATA);
break;
case I2C_SMBUS_BLOCK_DATA:
sis630_write(SMB_ADDR,
((addr & 0x7f) << 1) | (read_write & 0x01));
sis630_write(SMB_CMD, command);
size = SIS630_BLOCK_DATA;
return sis630_block_data(adap, data, read_write);
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
status = sis630_transaction(adap, size);
if (status)
return status;
if ((size != SIS630_PCALL) &&
((read_write == I2C_SMBUS_WRITE) || (size == SIS630_QUICK))) {
return 0;
}
switch (size) {
case SIS630_BYTE:
case SIS630_BYTE_DATA:
data->byte = sis630_read(SMB_BYTE);
break;
case SIS630_PCALL:
case SIS630_WORD_DATA:
data->word = sis630_read(SMB_BYTE) +
(sis630_read(SMB_BYTE + 1) << 8);
break;
}
return 0;
}
static u32 sis630_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_DATA;
}
static int sis630_setup(struct pci_dev *sis630_dev)
{
unsigned char b;
struct pci_dev *dummy = NULL;
int retval, i;
/* acpi base address */
unsigned short acpi_base;
/* check for supported SiS devices */
for (i = 0; supported[i] > 0; i++) {
dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy);
if (dummy)
break; /* found */
}
if (dummy) {
pci_dev_put(dummy);
} else if (force) {
dev_err(&sis630_dev->dev,
"WARNING: Can't detect SIS630 compatible device, but "
"loading because of force option enabled\n");
} else {
return -ENODEV;
}
/*
Enable ACPI first , so we can accsess reg 74-75
in acpi io space and read acpi base addr
*/
if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, &b)) {
dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
retval = -ENODEV;
goto exit;
}
/* if ACPI already enabled , do nothing */
if (!(b & 0x80) &&
pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
retval = -ENODEV;
goto exit;
}
/* Determine the ACPI base address */
if (pci_read_config_word(sis630_dev,
SIS630_ACPI_BASE_REG, &acpi_base)) {
dev_err(&sis630_dev->dev,
"Error: Can't determine ACPI base address\n");
retval = -ENODEV;
goto exit;
}
dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04hx\n", acpi_base);
if (supported[i] == PCI_DEVICE_ID_SI_760)
smbus_base = acpi_base + 0xE0;
else
smbus_base = acpi_base + 0x80;
dev_dbg(&sis630_dev->dev, "SMBus base at 0x%04hx\n", smbus_base);
retval = acpi_check_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name);
if (retval)
goto exit;
/* Everything is happy, let's grab the memory and set things up. */
if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name)) {
dev_err(&sis630_dev->dev,
"I/O Region 0x%04hx-0x%04hx for SMBus already in use.\n",
smbus_base + SMB_STS,
smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1);
retval = -EBUSY;
goto exit;
}
retval = 0;
exit:
if (retval)
smbus_base = 0;
return retval;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = sis630_access,
.functionality = sis630_func,
};
static struct i2c_adapter sis630_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
.retries = 3
};
static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_964) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (sis630_setup(dev)) {
dev_err(&dev->dev,
"SIS630 compatible bus not detected, "
"module not inserted.\n");
return -ENODEV;
}
/* set up the sysfs linkage to our parent device */
sis630_adapter.dev.parent = &dev->dev;
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
"SMBus SIS630 adapter at %04hx", smbus_base + SMB_STS);
return i2c_add_adapter(&sis630_adapter);
}
static void sis630_remove(struct pci_dev *dev)
{
if (smbus_base) {
i2c_del_adapter(&sis630_adapter);
release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
smbus_base = 0;
}
}
static struct pci_driver sis630_driver = {
.name = "sis630_smbus",
.id_table = sis630_ids,
.probe = sis630_probe,
.remove = sis630_remove,
};
module_pci_driver(sis630_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Malysh <amalysh@web.de>");
MODULE_DESCRIPTION("SIS630 SMBus driver");
| gpl-2.0 |
AragaoAnderson/kitkat_kernel_hammerhead | arch/blackfin/mach-bf537/boards/stamp.c | 4418 | 78854 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/plat-ram.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
#include <linux/i2c.h>
#include <linux/i2c/adp5588.h>
#include <linux/etherdevice.h>
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
#include <linux/spi/mmc_spi.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <asm/bfin_sport.h>
#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
#include <linux/regulator/fixed.h>
#endif
#include <linux/regulator/machine.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/userspace-consumer.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "ADI BF537-STAMP";
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
#include <linux/usb/isp1760.h>
static struct resource bfin_isp1760_resources[] = {
[0] = {
.start = 0x203C0000,
.end = 0x203C0000 + 0x000fffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
.dack_polarity_high = 0,
.dreq_polarity_high = 0,
};
static struct platform_device bfin_isp1760_device = {
.name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
},
.num_resources = ARRAY_SIZE(bfin_isp1760_resources),
.resource = bfin_isp1760_resources,
};
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PF2, 1, "gpio-keys: BTN0"},
{BTN_1, GPIO_PF3, 1, "gpio-keys: BTN1"},
{BTN_2, GPIO_PF4, 1, "gpio-keys: BTN2"},
{BTN_3, GPIO_PF5, 1, "gpio-keys: BTN3"},
};
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
static struct resource bfin_pcmcia_cf_resources[] = {
{
.start = 0x20310000, /* IO PORT */
.end = 0x20312000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20311000, /* Attribute Memory */
.end = 0x20311FFF,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
}, {
.start = 6, /* Card Detect PF6 */
.end = 6,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pcmcia_cf_device = {
.name = "bfin_cf_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
.resource = bfin_pcmcia_cf_resources,
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.name = "smc91x-regs",
.start = 0x20300300,
.end = 0x20300300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
static struct resource dm9000_resources[] = {
[0] = {
.start = 0x203FB800,
.end = 0x203FB800 + 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x203FB804,
.end = 0x203FB804 + 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = IRQ_PF9,
.end = IRQ_PF9,
.flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE),
},
};
static struct platform_device dm9000_device = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(dm9000_resources),
.resource = dm9000_resources,
};
#endif
#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
static struct resource sl811_hcd_resources[] = {
{
.start = 0x20340000,
.end = 0x20340000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20340004,
.end = 0x20340004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
void sl811_port_power(struct device *dev, int is_on)
{
gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS");
gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on);
}
#endif
static struct sl811_platform_data sl811_priv = {
.potpg = 10,
.power = 250, /* == 500mA */
#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
.port_power = &sl811_port_power,
#endif
};
static struct platform_device sl811_hcd_device = {
.name = "sl811-hcd",
.id = 0,
.dev = {
.platform_data = &sl811_priv,
},
.num_resources = ARRAY_SIZE(sl811_hcd_resources),
.resource = sl811_hcd_resources,
};
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
static struct resource isp1362_hcd_resources[] = {
{
.start = 0x20360000,
.end = 0x20360000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20360004,
.end = 0x20360004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF3,
.end = IRQ_PF3,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
static struct isp1362_platform_data isp1362_priv = {
.sel15Kres = 1,
.clknotstop = 0,
.oc_enable = 0,
.int_act_high = 0,
.int_edge_triggered = 0,
.remote_wakeup_connected = 0,
.no_power_switching = 1,
.power_switching_mode = 0,
};
static struct platform_device isp1362_hcd_device = {
.name = "isp1362-hcd",
.id = 0,
.dev = {
.platform_data = &isp1362_priv,
},
.num_resources = ARRAY_SIZE(isp1362_hcd_resources),
.resource = isp1362_hcd_resources,
};
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
static struct resource bfin_can_resources[] = {
{
.start = 0xFFC02A00,
.end = 0xFFC02FFF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_CAN_RX,
.end = IRQ_CAN_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN_TX,
.end = IRQ_CAN_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN_ERROR,
.end = IRQ_CAN_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_can_device = {
.name = "bfin_can",
.num_resources = ARRAY_SIZE(bfin_can_resources),
.resource = bfin_can_resources,
.dev = {
.platform_data = &bfin_can_peripherals, /* Passed to driver */
},
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_MII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = PHY_POLL, /* IRQ_MAC_PHYINT */
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = 1,
.flags = IORESOURCE_BUS,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static struct mtd_partition bfin_plat_nand_partitions[] = {
{
.name = "linux kernel(nand)",
.size = 0x400000,
.offset = 0,
}, {
.name = "file system(nand)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
},
};
#define BFIN_NAND_PLAT_CLE 2
#define BFIN_NAND_PLAT_ALE 1
static void bfin_plat_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *this = mtd->priv;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_CLE));
else
writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_ALE));
}
#define BFIN_NAND_PLAT_READY GPIO_PF3
static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
{
return gpio_get_value(BFIN_NAND_PLAT_READY);
}
static struct platform_nand_data bfin_plat_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
.part_probe_types = part_probes,
.partitions = bfin_plat_nand_partitions,
.nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
},
.ctrl = {
.cmd_ctrl = bfin_plat_nand_cmd_ctrl,
.dev_ready = bfin_plat_nand_dev_ready,
},
};
#define MAX(x, y) (x > y ? x : y)
static struct resource bfin_plat_nand_resources = {
.start = 0x20212000,
.end = 0x20212000 + (1 << MAX(BFIN_NAND_PLAT_CLE, BFIN_NAND_PLAT_ALE)),
.flags = IORESOURCE_MEM,
};
static struct platform_device bfin_async_nand_device = {
.name = "gen_nand",
.id = -1,
.num_resources = 1,
.resource = &bfin_plat_nand_resources,
.dev = {
.platform_data = &bfin_plat_nand_data,
},
};
static void bfin_plat_nand_init(void)
{
gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
}
#else
static void bfin_plat_nand_init(void) {}
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition stamp_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = 0x400000 - 0x40000 - 0x180000 - 0x10000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "MAC Address(nor)",
.size = MTDPART_SIZ_FULL,
.offset = 0x3F0000,
.mask_flags = MTD_WRITEABLE,
}
};
static struct physmap_flash_data stamp_flash_data = {
.width = 2,
.parts = stamp_partitions,
.nr_parts = ARRAY_SIZE(stamp_partitions),
#ifdef CONFIG_ROMKERNEL
.probe_type = "map_rom",
#endif
};
static struct resource stamp_flash_resource = {
.start = 0x20000000,
.end = 0x203fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device stamp_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &stamp_flash_data,
},
.num_resources = 1,
.resource = &stamp_flash_resource,
};
#endif
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
/* .type = "m25p64", */
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
#include <linux/input/ad714x.h>
static struct ad714x_slider_plat ad7147_spi_slider_plat[] = {
{
.start_stage = 0,
.end_stage = 7,
.max_coord = 128,
},
};
static struct ad714x_button_plat ad7147_spi_button_plat[] = {
{
.keycode = BTN_FORWARD,
.l_mask = 0,
.h_mask = 0x600,
},
{
.keycode = BTN_LEFT,
.l_mask = 0,
.h_mask = 0x500,
},
{
.keycode = BTN_MIDDLE,
.l_mask = 0,
.h_mask = 0x800,
},
{
.keycode = BTN_RIGHT,
.l_mask = 0x100,
.h_mask = 0x400,
},
{
.keycode = BTN_BACK,
.l_mask = 0x200,
.h_mask = 0x400,
},
};
static struct ad714x_platform_data ad7147_spi_platform_data = {
.slider_num = 1,
.button_num = 5,
.slider = ad7147_spi_slider_plat,
.button = ad7147_spi_button_plat,
.stage_cfg_reg = {
{0xFBFF, 0x1FFF, 0, 0x2626, 1600, 1600, 1600, 1600},
{0xEFFF, 0x1FFF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FFE, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FFB, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FEF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FBF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1EFF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1BFF, 0, 0x2626, 1600, 1600, 1600, 1600},
{0xFF7B, 0x3FFF, 0x506, 0x2626, 1100, 1100, 1150, 1150},
{0xFDFE, 0x3FFF, 0x606, 0x2626, 1100, 1100, 1150, 1150},
{0xFEBA, 0x1FFF, 0x1400, 0x2626, 1200, 1200, 1300, 1300},
{0xFFEF, 0x1FFF, 0x0, 0x2626, 1100, 1100, 1150, 1150},
},
.sys_cfg_reg = {0x2B2, 0x0, 0x3233, 0x819, 0x832, 0xCFF, 0xCFF, 0x0},
};
#endif
#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE)
#include <linux/input/ad714x.h>
static struct ad714x_button_plat ad7142_i2c_button_plat[] = {
{
.keycode = BTN_1,
.l_mask = 0,
.h_mask = 0x1,
},
{
.keycode = BTN_2,
.l_mask = 0,
.h_mask = 0x2,
},
{
.keycode = BTN_3,
.l_mask = 0,
.h_mask = 0x4,
},
{
.keycode = BTN_4,
.l_mask = 0x0,
.h_mask = 0x8,
},
};
static struct ad714x_platform_data ad7142_i2c_platform_data = {
.button_num = 4,
.button = ad7142_i2c_button_plat,
.stage_cfg_reg = {
/* fixme: figure out right setting for all comoponent according
* to hardware feature of EVAL-AD7142EB board */
{0xE7FF, 0x3FFF, 0x0005, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFDBF, 0x3FFF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFFFF, 0x2DFF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFFFF, 0x37BF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
},
.sys_cfg_reg = {0x0B2, 0x0, 0x690, 0x664, 0x290F, 0xF, 0xF, 0x0},
};
#endif
#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
static unsigned short ad2s120x_platform_data[] = {
/* used as SAMPLE and RDVEL */
GPIO_PF5, GPIO_PF6, 0
};
static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
static unsigned short ad2s1210_platform_data[] = {
/* use as SAMPLE, A0, A1 */
GPIO_PF7, GPIO_PF8, GPIO_PF9,
# if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
/* the RES0 and RES1 pins */
GPIO_PF4, GPIO_PF5,
# endif
0,
};
static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE)
static struct bfin5xx_spi_chip ad7314_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE)
static unsigned short ad7816_platform_data[] = {
GPIO_PF4, /* rdwr_pin */
GPIO_PF5, /* convert_pin */
GPIO_PF7, /* busy_pin */
0,
};
static struct bfin5xx_spi_chip ad7816_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE)
static unsigned long adt7310_platform_data[3] = {
/* INT bound temperature alarm event. line 1 */
IRQ_PG4, IRQF_TRIGGER_LOW,
/* CT bound temperature alarm event irq_flags. line 0 */
IRQF_TRIGGER_LOW,
};
static struct bfin5xx_spi_chip adt7310_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE)
static unsigned short ad7298_platform_data[] = {
GPIO_PF7, /* busy_pin */
0,
};
#endif
#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE)
static unsigned long adt7316_spi_data[2] = {
IRQF_TRIGGER_LOW, /* interrupt flags */
GPIO_PF7, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */
};
static struct bfin5xx_spi_chip adt7316_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
static int bfin_mmc_spi_init(struct device *dev,
irqreturn_t (*detect_int)(int, void *), void *data)
{
return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
}
static void bfin_mmc_spi_exit(struct device *dev, void *data)
{
free_irq(MMC_SPI_CARD_DETECT_INT, data);
}
static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
.init = bfin_mmc_spi_init,
.exit = bfin_mmc_spi_exit,
.detect_delay = 100, /* msecs */
};
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
.pio_interrupt = 0,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
#include <linux/spi/ad7877.h>
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.pressure_max = 1000,
.pressure_min = 0,
.stopacq_polarity = 1,
.first_conversion_delay = 3,
.acquisition_time = 1,
.averaging = 1,
.pen_down_acc_interval = 1,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
#include <linux/spi/ad7879.h>
static const struct ad7879_platform_data bfin_ad7879_ts_info = {
.model = 7879, /* Model = AD7879 */
.x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */
.pressure_max = 10000,
.pressure_min = 0,
.first_conversion_delay = 3, /* wait 512us before do a first conversion */
.acquisition_time = 1, /* 4us acquisition time per sample */
.median = 2, /* do 8 measurements */
.averaging = 1, /* take the average of 4 middle samples */
.pen_down_acc_interval = 255, /* 9.4 ms */
.gpio_export = 1, /* Export GPIO to gpiolib */
.gpio_base = -1, /* Dynamic allocation */
};
#endif
#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE)
#include <linux/input/adxl34x.h>
static const struct adxl34x_platform_data adxl34x_info = {
.x_axis_offset = 0,
.y_axis_offset = 0,
.z_axis_offset = 0,
.tap_threshold = 0x31,
.tap_duration = 0x10,
.tap_latency = 0x60,
.tap_window = 0xF0,
.tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN,
.act_axis_control = 0xFF,
.activity_threshold = 5,
.inactivity_threshold = 3,
.inactivity_time = 4,
.free_fall_threshold = 0x7,
.free_fall_time = 0x20,
.data_rate = 0x8,
.data_range = ADXL_FULL_RES,
.ev_type = EV_ABS,
.ev_code_x = ABS_X, /* EV_REL */
.ev_code_y = ABS_Y, /* EV_REL */
.ev_code_z = ABS_Z, /* EV_REL */
.ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY x,y,z */
/* .ev_code_ff = KEY_F,*/ /* EV_KEY */
/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */
.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
.fifo_mode = ADXL_FIFO_STREAM,
.orientation_enable = ADXL_EN_ORIENTATION_3D,
.deadzone_angle = ADXL_DEADZONE_ANGLE_10p8,
.divisor_length = ADXL_LP_FILTER_DIVISOR_16,
/* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */
.ev_codes_orient_3d = {BTN_Z, BTN_Y, BTN_X, BTN_A, BTN_B, BTN_C},
};
#endif
#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
.enable_dma = 1,
};
#endif
#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
#include <linux/spi/adf702x.h>
#define TXREG 0x0160A470
static const u32 adf7021_regs[] = {
0x09608FA0,
0x00575011,
0x00A7F092,
0x2B141563,
0x81F29E94,
0x00003155,
0x050A4F66,
0x00000007,
0x00000008,
0x000231E9,
0x3296354A,
0x891A2B3B,
0x00000D9C,
0x0000000D,
0x0000000E,
0x0000000F,
};
static struct adf702x_platform_data adf7021_platform_data = {
.regs_base = (void *)SPORT1_TCR1,
.dma_ch_rx = CH_SPORT1_RX,
.dma_ch_tx = CH_SPORT1_TX,
.irq_sport_err = IRQ_SPORT1_ERROR,
.gpio_int_rfs = GPIO_PF8,
.pin_req = {P_SPORT1_DTPRI, P_SPORT1_RFS, P_SPORT1_DRPRI,
P_SPORT1_RSCLK, P_SPORT1_TSCLK, 0},
.adf702x_model = MODEL_ADF7021,
.adf702x_regs = adf7021_regs,
.tx_reg = TXREG,
};
static inline void adf702x_mac_init(void)
{
random_ether_addr(adf7021_platform_data.mac_addr);
}
#else
static inline void adf702x_mac_init(void) {}
#endif
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
#include <linux/spi/ads7846.h>
static int ads7873_get_pendown_state(void)
{
return gpio_get_value(GPIO_PF6);
}
static struct ads7846_platform_data __initdata ad7873_pdata = {
.model = 7873, /* AD7873 */
.x_max = 0xfff,
.y_max = 0xfff,
.x_plate_ohms = 620,
.debounce_max = 1,
.debounce_rep = 0,
.debounce_tol = (~0),
.get_pendown_state = ads7873_get_pendown_state,
};
#endif
#if defined(CONFIG_MTD_DATAFLASH) \
|| defined(CONFIG_MTD_DATAFLASH_MODULE)
static struct mtd_partition bfin_spi_dataflash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_dataflash_data = {
.name = "SPI Dataflash",
.parts = bfin_spi_dataflash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
};
/* DataFlash chip */
static struct bfin5xx_spi_chip data_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE)
static struct bfin5xx_spi_chip spi_ad7476_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MTD_DATAFLASH) \
|| defined(CONFIG_MTD_DATAFLASH_MODULE)
{ /* DataFlash chip */
.modalias = "mtd_dataflash",
.max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_dataflash_data,
.controller_data = &data_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \
|| defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
{
.modalias = "ad1836",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
.mode = SPI_MODE_3,
},
#endif
#ifdef CONFIG_SND_SOC_AD193X_SPI
{
.modalias = "ad193x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SND_SOC_ADAV80X) || defined(CONFIG_SND_SOC_ADV80X_MODULE)
{
.modalias = "adav801",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
{
.modalias = "ad714x_captouch",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.irq = IRQ_PF4,
.bus_num = 0,
.chip_select = 5,
.mode = SPI_MODE_3,
.platform_data = &ad7147_spi_platform_data,
},
#endif
#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
{
.modalias = "ad2s90",
.bus_num = 0,
.chip_select = 3, /* change it for your board */
.mode = SPI_MODE_3,
.platform_data = NULL,
.controller_data = &ad2s90_spi_chip_info,
},
#endif
#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
{
.modalias = "ad2s120x",
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad2s120x_platform_data,
.controller_data = &ad2s120x_spi_chip_info,
},
#endif
#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
{
.modalias = "ad2s1210",
.max_speed_hz = 8192000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad2s1210_platform_data,
.controller_data = &ad2s1210_spi_chip_info,
},
#endif
#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE)
{
.modalias = "ad7314",
.max_speed_hz = 1000000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.controller_data = &ad7314_spi_chip_info,
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE)
{
.modalias = "ad7818",
.max_speed_hz = 1000000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad7816_platform_data,
.controller_data = &ad7816_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE)
{
.modalias = "adt7310",
.max_speed_hz = 1000000,
.irq = IRQ_PG5, /* CT alarm event. Line 0 */
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = adt7310_platform_data,
.controller_data = &adt7310_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE)
{
.modalias = "ad7298",
.max_speed_hz = 1000000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad7298_platform_data,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE)
{
.modalias = "adt7316",
.max_speed_hz = 1000000,
.irq = IRQ_PG5, /* interrupt line */
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = adt7316_spi_data,
.controller_data = &adt7316_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
.platform_data = &bfin_mmc_spi_pdata,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
{
.modalias = "ad7877",
.platform_data = &bfin_ad7877_ts_info,
.irq = IRQ_PF6,
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
{
.modalias = "ad7879",
.platform_data = &bfin_ad7879_ts_info,
.irq = IRQ_PF7,
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
{
.modalias = "bfin-lq035q1-spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
{
.modalias = "enc28j60",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.irq = IRQ_PF6,
.bus_num = 0,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
.controller_data = &enc28j60_spi_chip_info,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
{
.modalias = "adxl34x",
.platform_data = &adxl34x_info,
.irq = IRQ_PF6,
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
{
.modalias = "adf702x",
.max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
.platform_data = &adf7021_platform_data,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
{
.modalias = "ads7846",
.max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.irq = IRQ_PF6,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
.platform_data = &ad7873_pdata,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_AD7476) \
|| defined(CONFIG_AD7476_MODULE)
{
.modalias = "ad7476", /* Name of spi_driver for this device */
.max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. */
.platform_data = NULL, /* No spi_driver specific config */
.controller_data = &spi_ad7476_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADE7753) \
|| defined(CONFIG_ADE7753_MODULE)
{
.modalias = "ade7753",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7754) \
|| defined(CONFIG_ADE7754_MODULE)
{
.modalias = "ade7754",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7758) \
|| defined(CONFIG_ADE7758_MODULE)
{
.modalias = "ade7758",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7759) \
|| defined(CONFIG_ADE7759_MODULE)
{
.modalias = "ade7759",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7854_SPI) \
|| defined(CONFIG_ADE7854_SPI_MODULE)
{
.modalias = "ade7854",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADIS16060) \
|| defined(CONFIG_ADIS16060_MODULE)
{
.modalias = "adis16060_r",
.max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = MAX_CTRL_CS + 1, /* CS for read, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_0,
},
{
.modalias = "adis16060_w",
.max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2, /* CS for write, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADIS16130) \
|| defined(CONFIG_ADIS16130_MODULE)
{
.modalias = "adis16130",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS for read, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADIS16201) \
|| defined(CONFIG_ADIS16201_MODULE)
{
.modalias = "adis16201",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16203) \
|| defined(CONFIG_ADIS16203_MODULE)
{
.modalias = "adis16203",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16204) \
|| defined(CONFIG_ADIS16204_MODULE)
{
.modalias = "adis16204",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16209) \
|| defined(CONFIG_ADIS16209_MODULE)
{
.modalias = "adis16209",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16220) \
|| defined(CONFIG_ADIS16220_MODULE)
{
.modalias = "adis16220",
.max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16240) \
|| defined(CONFIG_ADIS16240_MODULE)
{
.modalias = "adis16240",
.max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16260) \
|| defined(CONFIG_ADIS16260_MODULE)
{
.modalias = "adis16260",
.max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16261) \
|| defined(CONFIG_ADIS16261_MODULE)
{
.modalias = "adis16261",
.max_speed_hz = 2500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADIS16300) \
|| defined(CONFIG_ADIS16300_MODULE)
{
.modalias = "adis16300",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16350) \
|| defined(CONFIG_ADIS16350_MODULE)
{
.modalias = "adis16364",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16400) \
|| defined(CONFIG_ADIS16400_MODULE)
{
.modalias = "adis16400",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
};
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE)
/* SPORT SPI controller data */
static struct bfin5xx_spi_master bfin_sport_spi0_info = {
.num_chipselect = MAX_BLACKFIN_GPIOS,
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_DRPRI,
P_SPORT0_RSCLK, P_SPORT0_TFS, P_SPORT0_RFS, 0},
};
static struct resource bfin_sport_spi0_resource[] = {
[0] = {
.start = SPORT0_TCR1,
.end = SPORT0_TCR1 + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_sport_spi0_device = {
.name = "bfin-sport-spi",
.id = 1, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_sport_spi0_resource),
.resource = bfin_sport_spi0_resource,
.dev = {
.platform_data = &bfin_sport_spi0_info, /* Passed to driver */
},
};
static struct bfin5xx_spi_master bfin_sport_spi1_info = {
.num_chipselect = MAX_BLACKFIN_GPIOS,
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_DRPRI,
P_SPORT1_RSCLK, P_SPORT1_TFS, P_SPORT1_RFS, 0},
};
static struct resource bfin_sport_spi1_resource[] = {
[0] = {
.start = SPORT1_TCR1,
.end = SPORT1_TCR1 + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_sport_spi1_device = {
.name = "bfin-sport-spi",
.id = 2, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_sport_spi1_resource),
.resource = bfin_sport_spi1_resource,
.dev = {
.platform_data = &bfin_sport_spi1_info, /* Passed to driver */
},
};
#endif /* sport spi master and devices */
#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
static struct platform_device bfin_fb_device = {
.name = "bf537-lq035",
};
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
#include <asm/bfin-lq035q1.h>
static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
.mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
.ppi_mode = USE_RGB565_16_BIT_PPI,
.use_bl = 0, /* let something else control the LCD Blacklight */
.gpio_bl = GPIO_PF7,
};
static struct resource bfin_lq035q1_resources[] = {
{
.start = IRQ_PPI_ERROR,
.end = IRQ_PPI_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_lq035q1_device = {
.name = "bfin-lq035q1",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_lq035q1_resources),
.resource = bfin_lq035q1_resources,
.dev = {
.platform_data = &bfin_lq035q1_data,
},
};
#endif
#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
|| defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
#include <linux/videodev2.h>
#include <media/blackfin/bfin_capture.h>
#include <media/blackfin/ppi.h>
static const unsigned short ppi_req[] = {
P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
0,
};
static const struct ppi_info ppi_info = {
.type = PPI_TYPE_PPI,
.dma_ch = CH_PPI,
.irq_err = IRQ_PPI_ERROR,
.base = (void __iomem *)PPI_CONTROL,
.pin_req = ppi_req,
};
#if defined(CONFIG_VIDEO_VS6624) \
|| defined(CONFIG_VIDEO_VS6624_MODULE)
static struct v4l2_input vs6624_inputs[] = {
{
.index = 0,
.name = "Camera",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = V4L2_STD_UNKNOWN,
},
};
static struct bcap_route vs6624_routes[] = {
{
.input = 0,
.output = 0,
},
};
static const unsigned vs6624_ce_pin = GPIO_PF10;
static struct bfin_capture_config bfin_capture_data = {
.card_name = "BF537",
.inputs = vs6624_inputs,
.num_inputs = ARRAY_SIZE(vs6624_inputs),
.routes = vs6624_routes,
.i2c_adapter_id = 0,
.board_info = {
.type = "vs6624",
.addr = 0x10,
.platform_data = (void *)&vs6624_ce_pin,
},
.ppi_info = &ppi_info,
.ppi_control = (PACK_EN | DLEN_8 | XFR_TYPE | 0x0020),
};
#endif
static struct platform_device bfin_capture_device = {
.name = "bfin_capture",
.dev = {
.platform_data = &bfin_capture_data,
},
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART0_CTSRTS
{ /* CTS pin */
.start = GPIO_PG7,
.end = GPIO_PG7,
.flags = IORESOURCE_IO,
},
{ /* RTS pin */
.start = GPIO_PG6,
.end = GPIO_PG6,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = {
[0] = KEY_GRAVE,
[1] = KEY_1,
[2] = KEY_2,
[3] = KEY_3,
[4] = KEY_4,
[5] = KEY_5,
[6] = KEY_6,
[7] = KEY_7,
[8] = KEY_8,
[9] = KEY_9,
[10] = KEY_0,
[11] = KEY_MINUS,
[12] = KEY_EQUAL,
[13] = KEY_BACKSLASH,
[15] = KEY_KP0,
[16] = KEY_Q,
[17] = KEY_W,
[18] = KEY_E,
[19] = KEY_R,
[20] = KEY_T,
[21] = KEY_Y,
[22] = KEY_U,
[23] = KEY_I,
[24] = KEY_O,
[25] = KEY_P,
[26] = KEY_LEFTBRACE,
[27] = KEY_RIGHTBRACE,
[29] = KEY_KP1,
[30] = KEY_KP2,
[31] = KEY_KP3,
[32] = KEY_A,
[33] = KEY_S,
[34] = KEY_D,
[35] = KEY_F,
[36] = KEY_G,
[37] = KEY_H,
[38] = KEY_J,
[39] = KEY_K,
[40] = KEY_L,
[41] = KEY_SEMICOLON,
[42] = KEY_APOSTROPHE,
[43] = KEY_BACKSLASH,
[45] = KEY_KP4,
[46] = KEY_KP5,
[47] = KEY_KP6,
[48] = KEY_102ND,
[49] = KEY_Z,
[50] = KEY_X,
[51] = KEY_C,
[52] = KEY_V,
[53] = KEY_B,
[54] = KEY_N,
[55] = KEY_M,
[56] = KEY_COMMA,
[57] = KEY_DOT,
[58] = KEY_SLASH,
[60] = KEY_KPDOT,
[61] = KEY_KP7,
[62] = KEY_KP8,
[63] = KEY_KP9,
[64] = KEY_SPACE,
[65] = KEY_BACKSPACE,
[66] = KEY_TAB,
[67] = KEY_KPENTER,
[68] = KEY_ENTER,
[69] = KEY_ESC,
[70] = KEY_DELETE,
[74] = KEY_KPMINUS,
[76] = KEY_UP,
[77] = KEY_DOWN,
[78] = KEY_RIGHT,
[79] = KEY_LEFT,
};
static struct adp5588_kpad_platform_data adp5588_kpad_data = {
.rows = 8,
.cols = 10,
.keymap = adp5588_keymap,
.keymapsize = ARRAY_SIZE(adp5588_keymap),
.repeat = 0,
};
#endif
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
#include <linux/mfd/adp5520.h>
/*
* ADP5520/5501 Backlight Data
*/
static struct adp5520_backlight_platform_data adp5520_backlight_data = {
.fade_in = ADP5520_FADE_T_1200ms,
.fade_out = ADP5520_FADE_T_1200ms,
.fade_led_law = ADP5520_BL_LAW_LINEAR,
.en_ambl_sens = 1,
.abml_filt = ADP5520_BL_AMBL_FILT_640ms,
.l1_daylight_max = ADP5520_BL_CUR_mA(15),
.l1_daylight_dim = ADP5520_BL_CUR_mA(0),
.l2_office_max = ADP5520_BL_CUR_mA(7),
.l2_office_dim = ADP5520_BL_CUR_mA(0),
.l3_dark_max = ADP5520_BL_CUR_mA(3),
.l3_dark_dim = ADP5520_BL_CUR_mA(0),
.l2_trip = ADP5520_L2_COMP_CURR_uA(700),
.l2_hyst = ADP5520_L2_COMP_CURR_uA(50),
.l3_trip = ADP5520_L3_COMP_CURR_uA(80),
.l3_hyst = ADP5520_L3_COMP_CURR_uA(20),
};
/*
* ADP5520/5501 LEDs Data
*/
static struct led_info adp5520_leds[] = {
{
.name = "adp5520-led1",
.default_trigger = "none",
.flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms,
},
#ifdef ADP5520_EN_ALL_LEDS
{
.name = "adp5520-led2",
.default_trigger = "none",
.flags = FLAG_ID_ADP5520_LED2_ADP5501_LED1,
},
{
.name = "adp5520-led3",
.default_trigger = "none",
.flags = FLAG_ID_ADP5520_LED3_ADP5501_LED2,
},
#endif
};
static struct adp5520_leds_platform_data adp5520_leds_data = {
.num_leds = ARRAY_SIZE(adp5520_leds),
.leds = adp5520_leds,
.fade_in = ADP5520_FADE_T_600ms,
.fade_out = ADP5520_FADE_T_600ms,
.led_on_time = ADP5520_LED_ONT_600ms,
};
/*
* ADP5520 GPIO Data
*/
static struct adp5520_gpio_platform_data adp5520_gpio_data = {
.gpio_start = 50,
.gpio_en_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2,
.gpio_pullup_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2,
};
/*
* ADP5520 Keypad Data
*/
static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = {
[ADP5520_KEY(0, 0)] = KEY_GRAVE,
[ADP5520_KEY(0, 1)] = KEY_1,
[ADP5520_KEY(0, 2)] = KEY_2,
[ADP5520_KEY(0, 3)] = KEY_3,
[ADP5520_KEY(1, 0)] = KEY_4,
[ADP5520_KEY(1, 1)] = KEY_5,
[ADP5520_KEY(1, 2)] = KEY_6,
[ADP5520_KEY(1, 3)] = KEY_7,
[ADP5520_KEY(2, 0)] = KEY_8,
[ADP5520_KEY(2, 1)] = KEY_9,
[ADP5520_KEY(2, 2)] = KEY_0,
[ADP5520_KEY(2, 3)] = KEY_MINUS,
[ADP5520_KEY(3, 0)] = KEY_EQUAL,
[ADP5520_KEY(3, 1)] = KEY_BACKSLASH,
[ADP5520_KEY(3, 2)] = KEY_BACKSPACE,
[ADP5520_KEY(3, 3)] = KEY_ENTER,
};
static struct adp5520_keys_platform_data adp5520_keys_data = {
.rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0,
.cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0,
.keymap = adp5520_keymap,
.keymapsize = ARRAY_SIZE(adp5520_keymap),
.repeat = 0,
};
/*
* ADP5520/5501 Multifunction Device Init Data
*/
static struct adp5520_platform_data adp5520_pdev_data = {
.backlight = &adp5520_backlight_data,
.leds = &adp5520_leds_data,
.gpio = &adp5520_gpio_data,
.keys = &adp5520_keys_data,
};
#endif
#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE)
static struct adp5588_gpio_platform_data adp5588_gpio_data = {
.gpio_start = 50,
.pullup_dis_mask = 0,
};
#endif
#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE)
#include <linux/i2c/adp8870.h>
static struct led_info adp8870_leds[] = {
{
.name = "adp8870-led7",
.default_trigger = "none",
.flags = ADP8870_LED_D7 | ADP8870_LED_OFFT_600ms,
},
};
static struct adp8870_backlight_platform_data adp8870_pdata = {
.bl_led_assign = ADP8870_BL_D1 | ADP8870_BL_D2 | ADP8870_BL_D3 |
ADP8870_BL_D4 | ADP8870_BL_D5 | ADP8870_BL_D6, /* 1 = Backlight 0 = Individual LED */
.pwm_assign = 0, /* 1 = Enables PWM mode */
.bl_fade_in = ADP8870_FADE_T_1200ms, /* Backlight Fade-In Timer */
.bl_fade_out = ADP8870_FADE_T_1200ms, /* Backlight Fade-Out Timer */
.bl_fade_law = ADP8870_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */
.en_ambl_sens = 1, /* 1 = enable ambient light sensor */
.abml_filt = ADP8870_BL_AMBL_FILT_320ms, /* Light sensor filter time */
.l1_daylight_max = ADP8870_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l1_daylight_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_bright_max = ADP8870_BL_CUR_mA(14), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_bright_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_office_max = ADP8870_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_office_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l4_indoor_max = ADP8870_BL_CUR_mA(3), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l4_indor_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l5_dark_max = ADP8870_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l5_dark_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_trip = ADP8870_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l2_hyst = ADP8870_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l3_trip = ADP8870_L3_COMP_CURR_uA(389), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
.l3_hyst = ADP8870_L3_COMP_CURR_uA(54), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
.l4_trip = ADP8870_L4_COMP_CURR_uA(167), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
.l4_hyst = ADP8870_L4_COMP_CURR_uA(16), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
.l5_trip = ADP8870_L5_COMP_CURR_uA(43), /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.l5_hyst = ADP8870_L5_COMP_CURR_uA(11), /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.leds = adp8870_leds,
.num_leds = ARRAY_SIZE(adp8870_leds),
.led_fade_law = ADP8870_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */
.led_fade_in = ADP8870_FADE_T_600ms,
.led_fade_out = ADP8870_FADE_T_600ms,
.led_on_time = ADP8870_LED_ONT_200ms,
};
#endif
#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
#include <linux/i2c/adp8860.h>
static struct led_info adp8860_leds[] = {
{
.name = "adp8860-led7",
.default_trigger = "none",
.flags = ADP8860_LED_D7 | ADP8860_LED_OFFT_600ms,
},
};
static struct adp8860_backlight_platform_data adp8860_pdata = {
.bl_led_assign = ADP8860_BL_D1 | ADP8860_BL_D2 | ADP8860_BL_D3 |
ADP8860_BL_D4 | ADP8860_BL_D5 | ADP8860_BL_D6, /* 1 = Backlight 0 = Individual LED */
.bl_fade_in = ADP8860_FADE_T_1200ms, /* Backlight Fade-In Timer */
.bl_fade_out = ADP8860_FADE_T_1200ms, /* Backlight Fade-Out Timer */
.bl_fade_law = ADP8860_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */
.en_ambl_sens = 1, /* 1 = enable ambient light sensor */
.abml_filt = ADP8860_BL_AMBL_FILT_320ms, /* Light sensor filter time */
.l1_daylight_max = ADP8860_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l1_daylight_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_office_max = ADP8860_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_office_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_dark_max = ADP8860_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_dark_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_trip = ADP8860_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l2_hyst = ADP8860_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l3_trip = ADP8860_L3_COMP_CURR_uA(43), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.l3_hyst = ADP8860_L3_COMP_CURR_uA(11), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.leds = adp8860_leds,
.num_leds = ARRAY_SIZE(adp8860_leds),
.led_fade_law = ADP8860_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */
.led_fade_in = ADP8860_FADE_T_600ms,
.led_fade_out = ADP8860_FADE_T_600ms,
.led_on_time = ADP8860_LED_ONT_200ms,
};
#endif
#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
static struct regulator_consumer_supply ad5398_consumer = {
.supply = "current",
};
static struct regulator_init_data ad5398_regulator_data = {
.constraints = {
.name = "current range",
.max_uA = 120000,
.valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
.consumer_supplies = &ad5398_consumer,
};
#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
static struct platform_device ad5398_virt_consumer_device = {
.name = "reg-virt-consumer",
.id = 0,
.dev = {
.platform_data = "current", /* Passed to driver */
},
};
#endif
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
static struct regulator_bulk_data ad5398_bulk_data = {
.supply = "current",
};
static struct regulator_userspace_consumer_data ad5398_userspace_comsumer_data = {
.name = "ad5398",
.num_supplies = 1,
.supplies = &ad5398_bulk_data,
};
static struct platform_device ad5398_userspace_consumer_device = {
.name = "reg-userspace-consumer",
.id = 0,
.dev = {
.platform_data = &ad5398_userspace_comsumer_data,
},
};
#endif
#endif
#if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE)
/* INT bound temperature alarm event. line 1 */
static unsigned long adt7410_platform_data[2] = {
IRQ_PG4, IRQF_TRIGGER_LOW,
};
#endif
#if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE)
/* INT bound temperature alarm event. line 1 */
static unsigned long adt7316_i2c_data[2] = {
IRQF_TRIGGER_LOW, /* interrupt flags */
GPIO_PF4, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */
};
#endif
static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#ifdef CONFIG_SND_SOC_AD193X_I2C
{
I2C_BOARD_INFO("ad1937", 0x04),
},
#endif
#if defined(CONFIG_SND_SOC_ADAV80X) || defined(CONFIG_SND_SOC_ADAV80X_MODULE)
{
I2C_BOARD_INFO("adav803", 0x10),
},
#endif
#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE)
{
I2C_BOARD_INFO("ad7142_captouch", 0x2C),
.irq = IRQ_PG5,
.platform_data = (void *)&ad7142_i2c_platform_data,
},
#endif
#if defined(CONFIG_AD7150) || defined(CONFIG_AD7150_MODULE)
{
I2C_BOARD_INFO("ad7150", 0x48),
.irq = IRQ_PG5, /* fixme: use real interrupt number */
},
#endif
#if defined(CONFIG_AD7152) || defined(CONFIG_AD7152_MODULE)
{
I2C_BOARD_INFO("ad7152", 0x48),
},
#endif
#if defined(CONFIG_AD774X) || defined(CONFIG_AD774X_MODULE)
{
I2C_BOARD_INFO("ad774x", 0x48),
},
#endif
#if defined(CONFIG_ADE7854_I2C) || defined(CONFIG_ADE7854_I2C_MODULE)
{
I2C_BOARD_INFO("ade7854", 0x38),
},
#endif
#if defined(CONFIG_ADT75) || defined(CONFIG_ADT75_MODULE)
{
I2C_BOARD_INFO("adt75", 0x9),
.irq = IRQ_PG5,
},
#endif
#if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE)
{
I2C_BOARD_INFO("adt7410", 0x48),
/* CT critical temperature event. line 0 */
.irq = IRQ_PG5,
.platform_data = (void *)&adt7410_platform_data,
},
#endif
#if defined(CONFIG_AD7291) || defined(CONFIG_AD7291_MODULE)
{
I2C_BOARD_INFO("ad7291", 0x20),
.irq = IRQ_PG5,
},
#endif
#if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE)
{
I2C_BOARD_INFO("adt7316", 0x48),
.irq = IRQ_PG6,
.platform_data = (void *)&adt7316_i2c_data,
},
#endif
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("pcf8574_lcd", 0x22),
},
#endif
#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE)
{
I2C_BOARD_INFO("pcf8574_keypad", 0x27),
.irq = IRQ_PG6,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
{
I2C_BOARD_INFO("ad7879", 0x2F),
.irq = IRQ_PG5,
.platform_data = (void *)&bfin_ad7879_ts_info,
},
#endif
#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
{
I2C_BOARD_INFO("adp5588-keys", 0x34),
.irq = IRQ_PG0,
.platform_data = (void *)&adp5588_kpad_data,
},
#endif
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
{
I2C_BOARD_INFO("pmic-adp5520", 0x32),
.irq = IRQ_PG0,
.platform_data = (void *)&adp5520_pdev_data,
},
#endif
#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE)
{
I2C_BOARD_INFO("adxl34x", 0x53),
.irq = IRQ_PG3,
.platform_data = (void *)&adxl34x_info,
},
#endif
#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE)
{
I2C_BOARD_INFO("adp5588-gpio", 0x34),
.platform_data = (void *)&adp5588_gpio_data,
},
#endif
#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
{
I2C_BOARD_INFO("bfin-adv7393", 0x2B),
},
#endif
#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
{
I2C_BOARD_INFO("bf537-lq035-ad5280", 0x2F),
},
#endif
#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE)
{
I2C_BOARD_INFO("adp8870", 0x2B),
.platform_data = (void *)&adp8870_pdata,
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1371) || defined(CONFIG_SND_SOC_ADAU1371_MODULE)
{
I2C_BOARD_INFO("adau1371", 0x1A),
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE)
{
I2C_BOARD_INFO("adau1761", 0x38),
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1361) || defined(CONFIG_SND_SOC_ADAU1361_MODULE)
{
I2C_BOARD_INFO("adau1361", 0x38),
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1701) || defined(CONFIG_SND_SOC_ADAU1701_MODULE)
{
I2C_BOARD_INFO("adau1701", 0x34),
},
#endif
#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE)
{
I2C_BOARD_INFO("ad5258", 0x18),
},
#endif
#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE)
{
I2C_BOARD_INFO("ssm2602", 0x1b),
},
#endif
#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
{
I2C_BOARD_INFO("ad5398", 0xC),
.platform_data = (void *)&ad5398_regulator_data,
},
#endif
#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
{
I2C_BOARD_INFO("adp8860", 0x2A),
.platform_data = (void *)&adp8860_pdata,
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE)
{
I2C_BOARD_INFO("adau1373", 0x1A),
},
#endif
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("ad5252", 0x2e),
},
#endif
};
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
#define CF_IDE_NAND_CARD_USE_HDD_INTERFACE
/* #define CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE */
#ifdef CF_IDE_NAND_CARD_USE_HDD_INTERFACE
#define PATA_INT IRQ_PF5
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 1,
.irq_flags = IRQF_TRIGGER_HIGH,
};
static struct resource bfin_pata_resources[] = {
{
.start = 0x20314020,
.end = 0x2031403F,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2031401C,
.end = 0x2031401F,
.flags = IORESOURCE_MEM,
},
{
.start = PATA_INT,
.end = PATA_INT,
.flags = IORESOURCE_IRQ,
},
};
#elif defined(CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE)
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 0,
};
/* CompactFlash Storage Card Memory Mapped Addressing
* /REG = A11 = 1
*/
static struct resource bfin_pata_resources[] = {
{
.start = 0x20211800,
.end = 0x20211807,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2021180E, /* Device Ctl */
.end = 0x2021180E,
.flags = IORESOURCE_MEM,
},
};
#endif
static struct platform_device bfin_pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pata_resources),
.resource = bfin_pata_resources,
.dev = {
.platform_data = &bfin_pata_platform_data,
}
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
VRPAIR(VLEV_090, 376000000),
VRPAIR(VLEV_095, 426000000),
VRPAIR(VLEV_100, 426000000),
VRPAIR(VLEV_105, 476000000),
VRPAIR(VLEV_110, 476000000),
VRPAIR(VLEV_115, 476000000),
VRPAIR(VLEV_120, 500000000),
VRPAIR(VLEV_125, 533000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
#define SPORT_REQ(x) \
[x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
static const u16 bfin_snd_pin[][7] = {
SPORT_REQ(0),
SPORT_REQ(1),
};
static struct bfin_snd_platform_data bfin_snd_data[] = {
{
.pin_req = &bfin_snd_pin[0][0],
},
{
.pin_req = &bfin_snd_pin[1][0],
},
};
#define BFIN_SND_RES(x) \
[x] = { \
{ \
.start = SPORT##x##_TCR1, \
.end = SPORT##x##_TCR1, \
.flags = IORESOURCE_MEM \
}, \
{ \
.start = CH_SPORT##x##_RX, \
.end = CH_SPORT##x##_RX, \
.flags = IORESOURCE_DMA, \
}, \
{ \
.start = CH_SPORT##x##_TX, \
.end = CH_SPORT##x##_TX, \
.flags = IORESOURCE_DMA, \
}, \
{ \
.start = IRQ_SPORT##x##_ERROR, \
.end = IRQ_SPORT##x##_ERROR, \
.flags = IORESOURCE_IRQ, \
} \
}
static struct resource bfin_snd_resources[][4] = {
BFIN_SND_RES(0),
BFIN_SND_RES(1),
};
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
static struct platform_device bfin_i2s_pcm = {
.name = "bfin-i2s-pcm-audio",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
static struct platform_device bfin_tdm_pcm = {
.name = "bfin-tdm-pcm-audio",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
static struct platform_device bfin_ac97_pcm = {
.name = "bfin-ac97-pcm-audio",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \
defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
static const unsigned ad73311_gpio[] = {
GPIO_PF4,
};
static struct platform_device bfin_ad73311_machine = {
.name = "bfin-snd-ad73311",
.id = 1,
.dev = {
.platform_data = (void *)ad73311_gpio,
},
};
#endif
#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
static struct platform_device bfin_ad73311_codec_device = {
.name = "ad73311",
.id = -1,
};
#endif
#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X) || \
defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X_MODULE)
static struct platform_device bfin_eval_adav801_device = {
.name = "bfin-eval-adav801",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
static struct platform_device bfin_i2s = {
.name = "bfin-i2s",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
static struct platform_device bfin_tdm = {
.name = "bfin-tdm",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
static struct platform_device bfin_ac97 = {
.name = "bfin-ac97",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE)
#define REGULATOR_ADP122 "adp122"
#define REGULATOR_ADP122_UV 2500000
static struct regulator_consumer_supply adp122_consumers = {
.supply = REGULATOR_ADP122,
};
static struct regulator_init_data adp_switch_regulator_data = {
.constraints = {
.name = REGULATOR_ADP122,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.min_uV = REGULATOR_ADP122_UV,
.max_uV = REGULATOR_ADP122_UV,
.min_uA = 0,
.max_uA = 300000,
},
.num_consumer_supplies = 1, /* only 1 */
.consumer_supplies = &adp122_consumers,
};
static struct fixed_voltage_config adp_switch_pdata = {
.supply_name = REGULATOR_ADP122,
.microvolts = REGULATOR_ADP122_UV,
.gpio = GPIO_PF2,
.enable_high = 1,
.enabled_at_boot = 0,
.init_data = &adp_switch_regulator_data,
};
static struct platform_device adp_switch_device = {
.name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &adp_switch_pdata,
},
};
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
static struct regulator_bulk_data adp122_bulk_data = {
.supply = REGULATOR_ADP122,
};
static struct regulator_userspace_consumer_data adp122_userspace_comsumer_data = {
.name = REGULATOR_ADP122,
.num_supplies = 1,
.supplies = &adp122_bulk_data,
};
static struct platform_device adp122_userspace_consumer_device = {
.name = "reg-userspace-consumer",
.id = 0,
.dev = {
.platform_data = &adp122_userspace_comsumer_data,
},
};
#endif
#endif
#if defined(CONFIG_IIO_GPIO_TRIGGER) || \
defined(CONFIG_IIO_GPIO_TRIGGER_MODULE)
static struct resource iio_gpio_trigger_resources[] = {
[0] = {
.start = IRQ_PF5,
.end = IRQ_PF5,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
static struct platform_device iio_gpio_trigger = {
.name = "iio_gpio_trigger",
.num_resources = ARRAY_SIZE(iio_gpio_trigger_resources),
.resource = iio_gpio_trigger_resources,
};
#endif
#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373) || \
defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373_MODULE)
static struct platform_device bf5xx_adau1373_device = {
.name = "bfin-eval-adau1373",
};
#endif
#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701) || \
defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701_MODULE)
static struct platform_device bf5xx_adau1701_device = {
.name = "bfin-eval-adau1701",
};
#endif
static struct platform_device *stamp_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
&bfin_pcmcia_cf_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
&sl811_hcd_device,
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
&isp1362_hcd_device,
#endif
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
&bfin_isp1760_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
&dm9000_device,
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
&bfin_can_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE)
&bfin_sport_spi0_device,
&bfin_sport_spi1_device,
#endif
#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
&bfin_fb_device,
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
&bfin_lq035q1_device,
#endif
#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
|| defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
&bfin_capture_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
&bfin_pata_device,
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
&bfin_async_nand_device,
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
&stamp_flash_device,
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
&bfin_i2s_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
&bfin_tdm_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
&bfin_ac97_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || \
defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
&bfin_ad73311_machine,
#endif
#if defined(CONFIG_SND_SOC_AD73311) || defined(CONFIG_SND_SOC_AD73311_MODULE)
&bfin_ad73311_codec_device,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
&bfin_i2s,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
&bfin_tdm,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
&bfin_ac97,
#endif
#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
&ad5398_virt_consumer_device,
#endif
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
&ad5398_userspace_consumer_device,
#endif
#endif
#if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE)
&adp_switch_device,
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
&adp122_userspace_consumer_device,
#endif
#endif
#if defined(CONFIG_IIO_GPIO_TRIGGER) || \
defined(CONFIG_IIO_GPIO_TRIGGER_MODULE)
&iio_gpio_trigger,
#endif
#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373) || \
defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1373_MODULE)
&bf5xx_adau1373_device,
#endif
#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701) || \
defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1701_MODULE)
&bf5xx_adau1701_device,
#endif
#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X) || \
defined(CONFIG_SND_SOC_BFIN_EVAL_ADAV80X_MODULE)
&bfin_eval_adav801_device,
#endif
};
static int __init net2272_init(void)
{
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
int ret;
ret = gpio_request(GPIO_PF6, "net2272");
if (ret)
return ret;
/* Reset the USB chip */
gpio_direction_output(GPIO_PF6, 0);
mdelay(2);
gpio_set_value(GPIO_PF6, 1);
#endif
return 0;
}
static int __init stamp_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
bfin_plat_nand_init();
adf702x_mac_init();
platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
if (net2272_init())
pr_warning("unable to configure net2272; it probably won't work\n");
return 0;
}
arch_initcall(stamp_init);
static struct platform_device *stamp_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(stamp_early_devices,
ARRAY_SIZE(stamp_early_devices));
}
void native_machine_restart(char *cmd)
{
/* workaround reboot hang when booting from SPI */
if ((bfin_read_SYSCR() & 0x7) == 0x3)
bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
}
/*
* Currently the MAC address is saved in Flash by U-Boot
*/
#define FLASH_MAC 0x203f0000
int bfin_get_ether_addr(char *addr)
{
*(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
*(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
return 0;
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
kkfong/android_kernel_oneplus_msm8974 | fs/hfs/dir.c | 4930 | 7943 | /*
* linux/fs/hfs/dir.c
*
* Copyright (C) 1995-1997 Paul H. Hargrove
* (C) 2003 Ardis Technologies <roman@ardistech.com>
* This file may be distributed under the terms of the GNU General Public License.
*
* This file contains directory-related functions independent of which
* scheme is being used to represent forks.
*
* Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
*/
#include "hfs_fs.h"
#include "btree.h"
/*
* hfs_lookup()
*/
static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
hfs_cat_rec rec;
struct hfs_find_data fd;
struct inode *inode = NULL;
int res;
hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name);
res = hfs_brec_read(&fd, &rec, sizeof(rec));
if (res) {
hfs_find_exit(&fd);
if (res == -ENOENT) {
/* No such entry */
inode = NULL;
goto done;
}
return ERR_PTR(res);
}
inode = hfs_iget(dir->i_sb, &fd.search_key->cat, &rec);
hfs_find_exit(&fd);
if (!inode)
return ERR_PTR(-EACCES);
done:
d_add(dentry, inode);
return NULL;
}
/*
* hfs_readdir
*/
static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
int len, err;
char strbuf[HFS_MAX_NAMELEN];
union hfs_cat_rec entry;
struct hfs_find_data fd;
struct hfs_readdir_data *rd;
u16 type;
if (filp->f_pos >= inode->i_size)
return 0;
hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
hfs_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
err = hfs_brec_find(&fd);
if (err)
goto out;
switch ((u32)filp->f_pos) {
case 0:
/* This is completely artificial... */
if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
goto out;
filp->f_pos++;
/* fall through */
case 1:
if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
err = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
if (entry.type != HFS_CDR_THD) {
printk(KERN_ERR "hfs: bad catalog folder thread\n");
err = -EIO;
goto out;
}
//if (fd.entrylength < HFS_MIN_THREAD_SZ) {
// printk(KERN_ERR "hfs: truncated catalog thread\n");
// err = -EIO;
// goto out;
//}
if (filldir(dirent, "..", 2, 1,
be32_to_cpu(entry.thread.ParID), DT_DIR))
goto out;
filp->f_pos++;
/* fall through */
default:
if (filp->f_pos >= inode->i_size)
goto out;
err = hfs_brec_goto(&fd, filp->f_pos - 1);
if (err)
goto out;
}
for (;;) {
if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) {
printk(KERN_ERR "hfs: walked past end of dir\n");
err = -EIO;
goto out;
}
if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
err = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
type = entry.type;
len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
if (type == HFS_CDR_DIR) {
if (fd.entrylength < sizeof(struct hfs_cat_dir)) {
printk(KERN_ERR "hfs: small dir entry\n");
err = -EIO;
goto out;
}
if (filldir(dirent, strbuf, len, filp->f_pos,
be32_to_cpu(entry.dir.DirID), DT_DIR))
break;
} else if (type == HFS_CDR_FIL) {
if (fd.entrylength < sizeof(struct hfs_cat_file)) {
printk(KERN_ERR "hfs: small file entry\n");
err = -EIO;
goto out;
}
if (filldir(dirent, strbuf, len, filp->f_pos,
be32_to_cpu(entry.file.FlNum), DT_REG))
break;
} else {
printk(KERN_ERR "hfs: bad catalog entry type %d\n", type);
err = -EIO;
goto out;
}
filp->f_pos++;
if (filp->f_pos >= inode->i_size)
goto out;
err = hfs_brec_goto(&fd, 1);
if (err)
goto out;
}
rd = filp->private_data;
if (!rd) {
rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL);
if (!rd) {
err = -ENOMEM;
goto out;
}
filp->private_data = rd;
rd->file = filp;
list_add(&rd->list, &HFS_I(inode)->open_dir_list);
}
memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key));
out:
hfs_find_exit(&fd);
return err;
}
static int hfs_dir_release(struct inode *inode, struct file *file)
{
struct hfs_readdir_data *rd = file->private_data;
if (rd) {
list_del(&rd->list);
kfree(rd);
}
return 0;
}
/*
* hfs_create()
*
* This is the create() entry in the inode_operations structure for
* regular HFS directories. The purpose is to create a new file in
* a directory and return a corresponding inode, given the inode for
* the directory and the name (and its length) of the new file.
*/
static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
struct inode *inode;
int res;
inode = hfs_new_inode(dir, &dentry->d_name, mode);
if (!inode)
return -ENOSPC;
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
clear_nlink(inode);
hfs_delete_inode(inode);
iput(inode);
return res;
}
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
return 0;
}
/*
* hfs_mkdir()
*
* This is the mkdir() entry in the inode_operations structure for
* regular HFS directories. The purpose is to create a new directory
* in a directory, given the inode for the parent directory and the
* name (and its length) of the new directory.
*/
static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int res;
inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode);
if (!inode)
return -ENOSPC;
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
clear_nlink(inode);
hfs_delete_inode(inode);
iput(inode);
return res;
}
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
return 0;
}
/*
* hfs_remove()
*
* This serves as both unlink() and rmdir() in the inode_operations
* structure for regular HFS directories. The purpose is to delete
* an existing child, given the inode for the parent directory and
* the name (and its length) of the existing directory.
*
* HFS does not have hardlinks, so both rmdir and unlink set the
* link count to 0. The only difference is the emptiness check.
*/
static int hfs_remove(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
int res;
if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
return -ENOTEMPTY;
res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
if (res)
return res;
clear_nlink(inode);
inode->i_ctime = CURRENT_TIME_SEC;
hfs_delete_inode(inode);
mark_inode_dirty(inode);
return 0;
}
/*
* hfs_rename()
*
* This is the rename() entry in the inode_operations structure for
* regular HFS directories. The purpose is to rename an existing
* file or directory, given the inode for the current directory and
* the name (and its length) of the existing file/directory and the
* inode for the new directory and the name (and its length) of the
* new file/directory.
* XXX: how do you handle must_be dir?
*/
static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int res;
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
res = hfs_remove(new_dir, new_dentry);
if (res)
return res;
}
res = hfs_cat_move(old_dentry->d_inode->i_ino,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name);
if (!res)
hfs_cat_build_key(old_dir->i_sb,
(btree_key *)&HFS_I(old_dentry->d_inode)->cat_key,
new_dir->i_ino, &new_dentry->d_name);
return res;
}
const struct file_operations hfs_dir_operations = {
.read = generic_read_dir,
.readdir = hfs_readdir,
.llseek = generic_file_llseek,
.release = hfs_dir_release,
};
const struct inode_operations hfs_dir_inode_operations = {
.create = hfs_create,
.lookup = hfs_lookup,
.unlink = hfs_remove,
.mkdir = hfs_mkdir,
.rmdir = hfs_remove,
.rename = hfs_rename,
.setattr = hfs_inode_setattr,
};
| gpl-2.0 |
thornbirdblue/8974_kernel | drivers/net/wireless/p54/eeprom.c | 5186 | 23930 | /*
* EEPROM parser code for mac80211 Prism54 drivers
*
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
*
* Based on:
* - the islsm (softmac prism54) driver, which is:
* Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
* - stlc45xx driver
* Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include <linux/crc-ccitt.h>
#include <linux/export.h>
#include "p54.h"
#include "eeprom.h"
#include "lmac.h"
static struct ieee80211_rate p54_bgrates[] = {
{ .bitrate = 10, .hw_value = 0, },
{ .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60, .hw_value = 4, },
{ .bitrate = 90, .hw_value = 5, },
{ .bitrate = 120, .hw_value = 6, },
{ .bitrate = 180, .hw_value = 7, },
{ .bitrate = 240, .hw_value = 8, },
{ .bitrate = 360, .hw_value = 9, },
{ .bitrate = 480, .hw_value = 10, },
{ .bitrate = 540, .hw_value = 11, },
};
static struct ieee80211_rate p54_arates[] = {
{ .bitrate = 60, .hw_value = 4, },
{ .bitrate = 90, .hw_value = 5, },
{ .bitrate = 120, .hw_value = 6, },
{ .bitrate = 180, .hw_value = 7, },
{ .bitrate = 240, .hw_value = 8, },
{ .bitrate = 360, .hw_value = 9, },
{ .bitrate = 480, .hw_value = 10, },
{ .bitrate = 540, .hw_value = 11, },
};
static struct p54_rssi_db_entry p54_rssi_default = {
/*
* The defaults are taken from usb-logs of the
* vendor driver. So, they should be safe to
* use in case we can't get a match from the
* rssi <-> dBm conversion database.
*/
.mul = 130,
.add = -398,
};
#define CHAN_HAS_CAL BIT(0)
#define CHAN_HAS_LIMIT BIT(1)
#define CHAN_HAS_CURVE BIT(2)
#define CHAN_HAS_ALL (CHAN_HAS_CAL | CHAN_HAS_LIMIT | CHAN_HAS_CURVE)
struct p54_channel_entry {
u16 freq;
u16 data;
int index;
enum ieee80211_band band;
};
struct p54_channel_list {
struct p54_channel_entry *channels;
size_t entries;
size_t max_entries;
size_t band_channel_num[IEEE80211_NUM_BANDS];
};
static int p54_get_band_from_freq(u16 freq)
{
/* FIXME: sync these values with the 802.11 spec */
if ((freq >= 2412) && (freq <= 2484))
return IEEE80211_BAND_2GHZ;
if ((freq >= 4920) && (freq <= 5825))
return IEEE80211_BAND_5GHZ;
return -1;
}
static int same_band(u16 freq, u16 freq2)
{
return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2);
}
static int p54_compare_channels(const void *_a,
const void *_b)
{
const struct p54_channel_entry *a = _a;
const struct p54_channel_entry *b = _b;
return a->freq - b->freq;
}
static int p54_compare_rssichan(const void *_a,
const void *_b)
{
const struct p54_rssi_db_entry *a = _a;
const struct p54_rssi_db_entry *b = _b;
return a->freq - b->freq;
}
static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
struct ieee80211_supported_band *band_entry,
enum ieee80211_band band)
{
/* TODO: generate rate array dynamically */
switch (band) {
case IEEE80211_BAND_2GHZ:
band_entry->bitrates = p54_bgrates;
band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates);
break;
case IEEE80211_BAND_5GHZ:
band_entry->bitrates = p54_arates;
band_entry->n_bitrates = ARRAY_SIZE(p54_arates);
break;
default:
return -EINVAL;
}
return 0;
}
static int p54_generate_band(struct ieee80211_hw *dev,
struct p54_channel_list *list,
unsigned int *chan_num,
enum ieee80211_band band)
{
struct p54_common *priv = dev->priv;
struct ieee80211_supported_band *tmp, *old;
unsigned int i, j;
int ret = -ENOMEM;
if ((!list->entries) || (!list->band_channel_num[band]))
return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
goto err_out;
tmp->channels = kzalloc(sizeof(struct ieee80211_channel) *
list->band_channel_num[band], GFP_KERNEL);
if (!tmp->channels)
goto err_out;
ret = p54_fill_band_bitrates(dev, tmp, band);
if (ret)
goto err_out;
for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
(i < list->entries); i++) {
struct p54_channel_entry *chan = &list->channels[i];
if (chan->band != band)
continue;
if (chan->data != CHAN_HAS_ALL) {
wiphy_err(dev->wiphy, "%s%s%s is/are missing for "
"channel:%d [%d MHz].\n",
(chan->data & CHAN_HAS_CAL ? "" :
" [iqauto calibration data]"),
(chan->data & CHAN_HAS_LIMIT ? "" :
" [output power limits]"),
(chan->data & CHAN_HAS_CURVE ? "" :
" [curve data]"),
chan->index, chan->freq);
continue;
}
tmp->channels[j].band = chan->band;
tmp->channels[j].center_freq = chan->freq;
priv->survey[*chan_num].channel = &tmp->channels[j];
priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_BUSY |
SURVEY_INFO_CHANNEL_TIME_TX;
tmp->channels[j].hw_value = (*chan_num);
j++;
(*chan_num)++;
}
if (j == 0) {
wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
(band == IEEE80211_BAND_2GHZ) ? 2 : 5);
ret = -ENODATA;
goto err_out;
}
tmp->n_channels = j;
old = priv->band_table[band];
priv->band_table[band] = tmp;
if (old) {
kfree(old->channels);
kfree(old);
}
return 0;
err_out:
if (tmp) {
kfree(tmp->channels);
kfree(tmp);
}
return ret;
}
static void p54_update_channel_param(struct p54_channel_list *list,
u16 freq, u16 data)
{
int band, i;
/*
* usually all lists in the eeprom are mostly sorted.
* so it's very likely that the entry we are looking for
* is right at the end of the list
*/
for (i = list->entries; i >= 0; i--) {
if (freq == list->channels[i].freq) {
list->channels[i].data |= data;
break;
}
}
if ((i < 0) && (list->entries < list->max_entries)) {
/* entry does not exist yet. Initialize a new one. */
band = p54_get_band_from_freq(freq);
/*
* filter out frequencies which don't belong into
* any supported band.
*/
if (band < 0)
return ;
i = list->entries++;
list->band_channel_num[band]++;
list->channels[i].freq = freq;
list->channels[i].data = data;
list->channels[i].band = band;
list->channels[i].index = ieee80211_frequency_to_channel(freq);
/* TODO: parse output_limit and fill max_power */
}
}
static int p54_generate_channel_lists(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
struct p54_channel_list *list;
unsigned int i, j, k, max_channel_num;
int ret = 0;
u16 freq;
if ((priv->iq_autocal_len != priv->curve_data->entries) ||
(priv->iq_autocal_len != priv->output_limit->entries))
wiphy_err(dev->wiphy,
"Unsupported or damaged EEPROM detected. "
"You may not be able to use all channels.\n");
max_channel_num = max_t(unsigned int, priv->output_limit->entries,
priv->iq_autocal_len);
max_channel_num = max_t(unsigned int, max_channel_num,
priv->curve_data->entries);
list = kzalloc(sizeof(*list), GFP_KERNEL);
if (!list) {
ret = -ENOMEM;
goto free;
}
priv->chan_num = max_channel_num;
priv->survey = kzalloc(sizeof(struct survey_info) * max_channel_num,
GFP_KERNEL);
if (!priv->survey) {
ret = -ENOMEM;
goto free;
}
list->max_entries = max_channel_num;
list->channels = kzalloc(sizeof(struct p54_channel_entry) *
max_channel_num, GFP_KERNEL);
if (!list->channels) {
ret = -ENOMEM;
goto free;
}
for (i = 0; i < max_channel_num; i++) {
if (i < priv->iq_autocal_len) {
freq = le16_to_cpu(priv->iq_autocal[i].freq);
p54_update_channel_param(list, freq, CHAN_HAS_CAL);
}
if (i < priv->output_limit->entries) {
freq = le16_to_cpup((__le16 *) (i *
priv->output_limit->entry_size +
priv->output_limit->offset +
priv->output_limit->data));
p54_update_channel_param(list, freq, CHAN_HAS_LIMIT);
}
if (i < priv->curve_data->entries) {
freq = le16_to_cpup((__le16 *) (i *
priv->curve_data->entry_size +
priv->curve_data->offset +
priv->curve_data->data));
p54_update_channel_param(list, freq, CHAN_HAS_CURVE);
}
}
/* sort the channel list by frequency */
sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
p54_compare_channels, NULL);
k = 0;
for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
if (p54_generate_band(dev, list, &k, i) == 0)
j++;
}
if (j == 0) {
/* no useable band available. */
ret = -EINVAL;
}
free:
if (list) {
kfree(list->channels);
kfree(list);
}
if (ret) {
kfree(priv->survey);
priv->survey = NULL;
}
return ret;
}
static int p54_convert_rev0(struct ieee80211_hw *dev,
struct pda_pa_curve_data *curve_data)
{
struct p54_common *priv = dev->priv;
struct p54_pa_curve_data_sample *dst;
struct pda_pa_curve_data_sample_rev0 *src;
size_t cd_len = sizeof(*curve_data) +
(curve_data->points_per_channel*sizeof(*dst) + 2) *
curve_data->channels;
unsigned int i, j;
void *source, *target;
priv->curve_data = kmalloc(sizeof(*priv->curve_data) + cd_len,
GFP_KERNEL);
if (!priv->curve_data)
return -ENOMEM;
priv->curve_data->entries = curve_data->channels;
priv->curve_data->entry_size = sizeof(__le16) +
sizeof(*dst) * curve_data->points_per_channel;
priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data);
priv->curve_data->len = cd_len;
memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data));
source = curve_data->data;
target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data;
for (i = 0; i < curve_data->channels; i++) {
__le16 *freq = source;
source += sizeof(__le16);
*((__le16 *)target) = *freq;
target += sizeof(__le16);
for (j = 0; j < curve_data->points_per_channel; j++) {
dst = target;
src = source;
dst->rf_power = src->rf_power;
dst->pa_detector = src->pa_detector;
dst->data_64qam = src->pcv;
/* "invent" the points for the other modulations */
#define SUB(x, y) (u8)(((x) - (y)) > (x) ? 0 : (x) - (y))
dst->data_16qam = SUB(src->pcv, 12);
dst->data_qpsk = SUB(dst->data_16qam, 12);
dst->data_bpsk = SUB(dst->data_qpsk, 12);
dst->data_barker = SUB(dst->data_bpsk, 14);
#undef SUB
target += sizeof(*dst);
source += sizeof(*src);
}
}
return 0;
}
static int p54_convert_rev1(struct ieee80211_hw *dev,
struct pda_pa_curve_data *curve_data)
{
struct p54_common *priv = dev->priv;
struct p54_pa_curve_data_sample *dst;
struct pda_pa_curve_data_sample_rev1 *src;
size_t cd_len = sizeof(*curve_data) +
(curve_data->points_per_channel*sizeof(*dst) + 2) *
curve_data->channels;
unsigned int i, j;
void *source, *target;
priv->curve_data = kzalloc(cd_len + sizeof(*priv->curve_data),
GFP_KERNEL);
if (!priv->curve_data)
return -ENOMEM;
priv->curve_data->entries = curve_data->channels;
priv->curve_data->entry_size = sizeof(__le16) +
sizeof(*dst) * curve_data->points_per_channel;
priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data);
priv->curve_data->len = cd_len;
memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data));
source = curve_data->data;
target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data;
for (i = 0; i < curve_data->channels; i++) {
__le16 *freq = source;
source += sizeof(__le16);
*((__le16 *)target) = *freq;
target += sizeof(__le16);
for (j = 0; j < curve_data->points_per_channel; j++) {
memcpy(target, source, sizeof(*src));
target += sizeof(*dst);
source += sizeof(*src);
}
source++;
}
return 0;
}
static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2",
"Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" };
static int p54_parse_rssical(struct ieee80211_hw *dev,
u8 *data, int len, u16 type)
{
struct p54_common *priv = dev->priv;
struct p54_rssi_db_entry *entry;
size_t db_len, entries;
int offset = 0, i;
if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
if (len != sizeof(struct pda_rssi_cal_entry) * entries) {
wiphy_err(dev->wiphy, "rssical size mismatch.\n");
goto err_data;
}
} else {
/*
* Some devices (Dell 1450 USB, Xbow 5GHz card, etc...)
* have an empty two byte header.
*/
if (*((__le16 *)&data[offset]) == cpu_to_le16(0))
offset += 2;
entries = (len - offset) /
sizeof(struct pda_rssi_cal_ext_entry);
if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
entries <= 0) {
wiphy_err(dev->wiphy, "invalid rssi database.\n");
goto err_data;
}
}
db_len = sizeof(*entry) * entries;
priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL);
if (!priv->rssi_db)
return -ENOMEM;
priv->rssi_db->offset = 0;
priv->rssi_db->entries = entries;
priv->rssi_db->entry_size = sizeof(*entry);
priv->rssi_db->len = db_len;
entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset);
if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset];
for (i = 0; i < entries; i++) {
entry[i].freq = le16_to_cpu(cal[i].freq);
entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
entry[i].add = (s16) le16_to_cpu(cal[i].add);
}
} else {
struct pda_rssi_cal_entry *cal = (void *) &data[offset];
for (i = 0; i < entries; i++) {
u16 freq = 0;
switch (i) {
case IEEE80211_BAND_2GHZ:
freq = 2437;
break;
case IEEE80211_BAND_5GHZ:
freq = 5240;
break;
}
entry[i].freq = freq;
entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
entry[i].add = (s16) le16_to_cpu(cal[i].add);
}
}
/* sort the list by channel frequency */
sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL);
return 0;
err_data:
wiphy_err(dev->wiphy,
"rssi calibration data packing type:(%x) len:%d.\n",
type, len);
print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len);
wiphy_err(dev->wiphy, "please report this issue.\n");
return -EINVAL;
}
struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
{
struct p54_rssi_db_entry *entry;
int i, found = -1;
if (!priv->rssi_db)
return &p54_rssi_default;
entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset);
for (i = 0; i < priv->rssi_db->entries; i++) {
if (!same_band(freq, entry[i].freq))
continue;
if (found == -1) {
found = i;
continue;
}
/* nearest match */
if (abs(freq - entry[i].freq) <
abs(freq - entry[found].freq)) {
found = i;
continue;
} else {
break;
}
}
return found < 0 ? &p54_rssi_default : &entry[found];
}
static void p54_parse_default_country(struct ieee80211_hw *dev,
void *data, int len)
{
struct pda_country *country;
if (len != sizeof(*country)) {
wiphy_err(dev->wiphy,
"found possible invalid default country eeprom entry. (entry size: %d)\n",
len);
print_hex_dump_bytes("country:", DUMP_PREFIX_NONE,
data, len);
wiphy_err(dev->wiphy, "please report this issue.\n");
return;
}
country = (struct pda_country *) data;
if (country->flags == PDR_COUNTRY_CERT_CODE_PSEUDO)
regulatory_hint(dev->wiphy, country->alpha2);
else {
/* TODO:
* write a shared/common function that converts
* "Regulatory domain codes" (802.11-2007 14.8.2.2)
* into ISO/IEC 3166-1 alpha2 for regulatory_hint.
*/
}
}
static int p54_convert_output_limits(struct ieee80211_hw *dev,
u8 *data, size_t len)
{
struct p54_common *priv = dev->priv;
if (len < 2)
return -EINVAL;
if (data[0] != 0) {
wiphy_err(dev->wiphy, "unknown output power db revision:%x\n",
data[0]);
return -EINVAL;
}
if (2 + data[1] * sizeof(struct pda_channel_output_limit) > len)
return -EINVAL;
priv->output_limit = kmalloc(data[1] *
sizeof(struct pda_channel_output_limit) +
sizeof(*priv->output_limit), GFP_KERNEL);
if (!priv->output_limit)
return -ENOMEM;
priv->output_limit->offset = 0;
priv->output_limit->entries = data[1];
priv->output_limit->entry_size =
sizeof(struct pda_channel_output_limit);
priv->output_limit->len = priv->output_limit->entry_size *
priv->output_limit->entries +
priv->output_limit->offset;
memcpy(priv->output_limit->data, &data[2],
data[1] * sizeof(struct pda_channel_output_limit));
return 0;
}
static struct p54_cal_database *p54_convert_db(struct pda_custom_wrapper *src,
size_t total_len)
{
struct p54_cal_database *dst;
size_t payload_len, entries, entry_size, offset;
payload_len = le16_to_cpu(src->len);
entries = le16_to_cpu(src->entries);
entry_size = le16_to_cpu(src->entry_size);
offset = le16_to_cpu(src->offset);
if (((entries * entry_size + offset) != payload_len) ||
(payload_len + sizeof(*src) != total_len))
return NULL;
dst = kmalloc(sizeof(*dst) + payload_len, GFP_KERNEL);
if (!dst)
return NULL;
dst->entries = entries;
dst->entry_size = entry_size;
dst->offset = offset;
dst->len = payload_len;
memcpy(dst->data, src->data, payload_len);
return dst;
}
int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
{
struct p54_common *priv = dev->priv;
struct eeprom_pda_wrap *wrap;
struct pda_entry *entry;
unsigned int data_len, entry_len;
void *tmp;
int err;
u8 *end = (u8 *)eeprom + len;
u16 synth = 0;
u16 crc16 = ~0;
wrap = (struct eeprom_pda_wrap *) eeprom;
entry = (void *)wrap->data + le16_to_cpu(wrap->len);
/* verify that at least the entry length/code fits */
while ((u8 *)entry <= end - sizeof(*entry)) {
entry_len = le16_to_cpu(entry->len);
data_len = ((entry_len - 1) << 1);
/* abort if entry exceeds whole structure */
if ((u8 *)entry + sizeof(*entry) + data_len > end)
break;
switch (le16_to_cpu(entry->code)) {
case PDR_MAC_ADDRESS:
if (data_len != ETH_ALEN)
break;
SET_IEEE80211_PERM_ADDR(dev, entry->data);
break;
case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS:
if (priv->output_limit)
break;
err = p54_convert_output_limits(dev, entry->data,
data_len);
if (err)
goto err;
break;
case PDR_PRISM_PA_CAL_CURVE_DATA: {
struct pda_pa_curve_data *curve_data =
(struct pda_pa_curve_data *)entry->data;
if (data_len < sizeof(*curve_data)) {
err = -EINVAL;
goto err;
}
switch (curve_data->cal_method_rev) {
case 0:
err = p54_convert_rev0(dev, curve_data);
break;
case 1:
err = p54_convert_rev1(dev, curve_data);
break;
default:
wiphy_err(dev->wiphy,
"unknown curve data revision %d\n",
curve_data->cal_method_rev);
err = -ENODEV;
break;
}
if (err)
goto err;
}
break;
case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
priv->iq_autocal = kmemdup(entry->data, data_len,
GFP_KERNEL);
if (!priv->iq_autocal) {
err = -ENOMEM;
goto err;
}
priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry);
break;
case PDR_DEFAULT_COUNTRY:
p54_parse_default_country(dev, entry->data, data_len);
break;
case PDR_INTERFACE_LIST:
tmp = entry->data;
while ((u8 *)tmp < entry->data + data_len) {
struct exp_if *exp_if = tmp;
if (exp_if->if_id == cpu_to_le16(IF_ID_ISL39000))
synth = le16_to_cpu(exp_if->variant);
tmp += sizeof(*exp_if);
}
break;
case PDR_HARDWARE_PLATFORM_COMPONENT_ID:
if (data_len < 2)
break;
priv->version = *(u8 *)(entry->data + 1);
break;
case PDR_RSSI_LINEAR_APPROXIMATION:
case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
err = p54_parse_rssical(dev, entry->data, data_len,
le16_to_cpu(entry->code));
if (err)
goto err;
break;
case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: {
struct pda_custom_wrapper *pda = (void *) entry->data;
__le16 *src;
u16 *dst;
int i;
if (priv->rssi_db || data_len < sizeof(*pda))
break;
priv->rssi_db = p54_convert_db(pda, data_len);
if (!priv->rssi_db)
break;
src = (void *) priv->rssi_db->data;
dst = (void *) priv->rssi_db->data;
for (i = 0; i < priv->rssi_db->entries; i++)
*(dst++) = (s16) le16_to_cpu(*(src++));
}
break;
case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
struct pda_custom_wrapper *pda = (void *) entry->data;
if (priv->output_limit || data_len < sizeof(*pda))
break;
priv->output_limit = p54_convert_db(pda, data_len);
}
break;
case PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM: {
struct pda_custom_wrapper *pda = (void *) entry->data;
if (priv->curve_data || data_len < sizeof(*pda))
break;
priv->curve_data = p54_convert_db(pda, data_len);
}
break;
case PDR_END:
crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry));
if (crc16 != le16_to_cpup((__le16 *)entry->data)) {
wiphy_err(dev->wiphy, "eeprom failed checksum "
"test!\n");
err = -ENOMSG;
goto err;
} else {
goto good_eeprom;
}
break;
default:
break;
}
crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2);
entry = (void *)entry + (entry_len + 1) * 2;
}
wiphy_err(dev->wiphy, "unexpected end of eeprom data.\n");
err = -ENODATA;
goto err;
good_eeprom:
if (!synth || !priv->iq_autocal || !priv->output_limit ||
!priv->curve_data) {
wiphy_err(dev->wiphy,
"not all required entries found in eeprom!\n");
err = -EINVAL;
goto err;
}
err = p54_generate_channel_lists(dev);
if (err)
goto err;
priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
p54_init_xbow_synth(priv);
if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
dev->wiphy->bands[IEEE80211_BAND_2GHZ] =
priv->band_table[IEEE80211_BAND_2GHZ];
if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
dev->wiphy->bands[IEEE80211_BAND_5GHZ] =
priv->band_table[IEEE80211_BAND_5GHZ];
if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED)
priv->rx_diversity_mask = 3;
if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED)
priv->tx_diversity_mask = 3;
if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
u8 perm_addr[ETH_ALEN];
wiphy_warn(dev->wiphy,
"Invalid hwaddr! Using randomly generated MAC addr\n");
random_ether_addr(perm_addr);
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
}
priv->cur_rssi = &p54_rssi_default;
wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
dev->wiphy->perm_addr, priv->version,
p54_rf_chips[priv->rxhw]);
return 0;
err:
kfree(priv->iq_autocal);
kfree(priv->output_limit);
kfree(priv->curve_data);
kfree(priv->rssi_db);
kfree(priv->survey);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
priv->rssi_db = NULL;
priv->survey = NULL;
wiphy_err(dev->wiphy, "eeprom parse failed!\n");
return err;
}
EXPORT_SYMBOL_GPL(p54_parse_eeprom);
int p54_read_eeprom(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
size_t eeprom_size = 0x2020, offset = 0, blocksize, maxblocksize;
int ret = -ENOMEM;
void *eeprom;
maxblocksize = EEPROM_READBACK_LEN;
if (priv->fw_var >= 0x509)
maxblocksize -= 0xc;
else
maxblocksize -= 0x4;
eeprom = kzalloc(eeprom_size, GFP_KERNEL);
if (unlikely(!eeprom))
goto free;
while (eeprom_size) {
blocksize = min(eeprom_size, maxblocksize);
ret = p54_download_eeprom(priv, (void *) (eeprom + offset),
offset, blocksize);
if (unlikely(ret))
goto free;
offset += blocksize;
eeprom_size -= blocksize;
}
ret = p54_parse_eeprom(dev, eeprom, offset);
free:
kfree(eeprom);
return ret;
}
EXPORT_SYMBOL_GPL(p54_read_eeprom);
| gpl-2.0 |
andreya108/bindu-kernel-base | drivers/ata/pata_ali.c | 7490 | 17902 | /*
* pata_ali.c - ALI 15x3 PATA for new ATA layer
* (C) 2005 Red Hat Inc
*
* based in part upon
* linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
*
* Copyright (C) 1998-2000 Michel Aubry, Maintainer
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
* Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
*
* Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
* May be copied or modified under the terms of the GNU General Public License
* Copyright (C) 2002 Alan Cox <alan@redhat.com>
* ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
*
* Documentation
* Chipset documentation available under NDA only
*
* TODO/CHECK
* Cannot have ATAPI on both master & slave for rev < c2 (???) but
* otherwise should do atapi DMA (For now for old we do PIO only for
* ATAPI)
* Review Sunblade workaround.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_ali"
#define DRV_VERSION "0.7.8"
static int ali_atapi_dma = 0;
module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
static struct pci_dev *ali_isa_bridge;
/*
* Cable special cases
*/
static const struct dmi_system_id cable_dmi_table[] = {
{
.ident = "HP Pavilion N5430",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
},
},
{
.ident = "Toshiba Satellite S1800-814",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
},
},
{ }
};
static int ali_cable_override(struct pci_dev *pdev)
{
/* Fujitsu P2000 */
if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
return 1;
/* Mitac 8317 (Winbook-A) and relatives */
if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317)
return 1;
/* Systems by DMI */
if (dmi_check_system(cable_dmi_table))
return 1;
return 0;
}
/**
* ali_c2_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection for C2 and later revisions
*/
static int ali_c2_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ata66;
/* Certain laptops use short but suitable cables and don't
implement the detect logic */
if (ali_cable_override(pdev))
return ATA_CBL_PATA40_SHORT;
/* Host view cable detect 0x4A bit 0 primary bit 1 secondary
Bit set for 40 pin */
pci_read_config_byte(pdev, 0x4A, &ata66);
if (ata66 & (1 << ap->port_no))
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
/**
* ali_20_filter - filter for earlier ALI DMA
* @ap: ALi ATA port
* @adev: attached device
*
* Ensure that we do not do DMA on CD devices. We may be able to
* fix that later on. Also ensure we do not do UDMA on WDC drives
*/
static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
{
char model_num[ATA_ID_PROD_LEN + 1];
/* No DMA on anything but a disk for now */
if (adev->class != ATA_DEV_ATA)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strstr(model_num, "WDC"))
return mask &= ~ATA_MASK_UDMA;
return mask;
}
/**
* ali_fifo_control - FIFO manager
* @ap: ALi channel to control
* @adev: device for FIFO control
* @on: 0 for off 1 for on
*
* Enable or disable the FIFO on a given device. Because of the way the
* ALi FIFO works it provides a boost on ATA disk but can be confused by
* ATAPI and we must therefore manage it.
*/
static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio_fifo = 0x54 + ap->port_no;
u8 fifo;
int shift = 4 * adev->devno;
/* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
0x00. Not all the docs agree but the behaviour we now use is the
one stated in the BIOS Programming Guide */
pci_read_config_byte(pdev, pio_fifo, &fifo);
fifo &= ~(0x0F << shift);
fifo |= (on << shift);
pci_write_config_byte(pdev, pio_fifo, fifo);
}
/**
* ali_program_modes - load mode registers
* @ap: ALi channel to load
* @adev: Device the timing is for
* @t: timing data
* @ultra: UDMA timing or zero for off
*
* Loads the timing registers for cmd/data and disable UDMA if
* ultra is zero. If ultra is set then load and enable the UDMA
* timing but do not touch the command/data timing.
*/
static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int cas = 0x58 + 4 * ap->port_no; /* Command timing */
int cbt = 0x59 + 4 * ap->port_no; /* Command timing */
int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
int udmat = 0x56 + ap->port_no; /* UDMA timing */
int shift = 4 * adev->devno;
u8 udma;
if (t != NULL) {
t->setup = clamp_val(t->setup, 1, 8) & 7;
t->act8b = clamp_val(t->act8b, 1, 8) & 7;
t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
t->active = clamp_val(t->active, 1, 8) & 7;
t->recover = clamp_val(t->recover, 1, 16) & 15;
pci_write_config_byte(pdev, cas, t->setup);
pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
}
/* Set up the UDMA enable */
pci_read_config_byte(pdev, udmat, &udma);
udma &= ~(0x0F << shift);
udma |= ultra << shift;
pci_write_config_byte(pdev, udmat, udma);
}
/**
* ali_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the ALi registers for PIO mode.
*/
static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *pair = ata_dev_pair(adev);
struct ata_timing t;
unsigned long T = 1000000000 / 33333; /* PCI clock based */
ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
if (pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
if (pair->dma_mode) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
}
/* PIO FIFO is only permitted on ATA disk */
if (adev->class != ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x00);
ali_program_modes(ap, adev, &t, 0);
if (adev->class == ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x05);
}
/**
* ali_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the ALi registers for DMA mode.
*/
static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
struct ata_device *pair = ata_dev_pair(adev);
struct ata_timing t;
unsigned long T = 1000000000 / 33333; /* PCI clock based */
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (adev->class == ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x08);
if (adev->dma_mode >= XFER_UDMA_0) {
ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
if (adev->dma_mode >= XFER_UDMA_3) {
u8 reg4b;
pci_read_config_byte(pdev, 0x4B, ®4b);
reg4b |= 1;
pci_write_config_byte(pdev, 0x4B, reg4b);
}
} else {
ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
if (pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
if (pair->dma_mode) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
}
ali_program_modes(ap, adev, &t, 0);
}
}
/**
* ali_warn_atapi_dma - Warn about ATAPI DMA disablement
* @adev: Device
*
* Whine about ATAPI DMA disablement if @adev is an ATAPI device.
* Can be used as ->dev_config.
*/
static void ali_warn_atapi_dma(struct ata_device *adev)
{
struct ata_eh_context *ehc = &adev->link->eh_context;
int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) {
ata_dev_warn(adev,
"WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n");
ata_dev_warn(adev,
"WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
}
}
/**
* ali_lock_sectors - Keep older devices to 255 sector mode
* @adev: Device
*
* Called during the bus probe for each device that is found. We use
* this call to lock the sector count of the device to 255 or less on
* older ALi controllers. If we didn't do this then large I/O's would
* require LBA48 commands which the older ALi requires are issued by
* slower PIO methods
*/
static void ali_lock_sectors(struct ata_device *adev)
{
adev->max_sectors = 255;
ali_warn_atapi_dma(adev);
}
/**
* ali_check_atapi_dma - DMA check for most ALi controllers
* @adev: Device
*
* Called to decide whether commands should be sent by DMA or PIO
*/
static int ali_check_atapi_dma(struct ata_queued_cmd *qc)
{
if (!ali_atapi_dma) {
/* FIXME: pata_ali can't do ATAPI DMA reliably but the
* IDE alim15x3 driver can. I tried lots of things
* but couldn't find what the actual difference was.
* If you got an idea, please write it to
* linux-ide@vger.kernel.org and cc htejun@gmail.com.
*
* Disable ATAPI DMA for now.
*/
return -EOPNOTSUPP;
}
/* If its not a media command, its not worth it */
if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC)
return -EOPNOTSUPP;
return 0;
}
static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes)
{
u8 r;
int port_bit = 4 << link->ap->port_no;
/* If our bridge is an ALI 1533 then do the extra work */
if (ali_isa_bridge) {
/* Tristate and re-enable the bus signals */
pci_read_config_byte(ali_isa_bridge, 0x58, &r);
r &= ~port_bit;
pci_write_config_byte(ali_isa_bridge, 0x58, r);
r |= port_bit;
pci_write_config_byte(ali_isa_bridge, 0x58, r);
}
ata_sff_postreset(link, classes);
}
static struct scsi_host_template ali_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
/*
* Port operations for PIO only ALi
*/
static struct ata_port_operations ali_early_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = ali_set_piomode,
.sff_data_xfer = ata_sff_data_xfer32,
};
static const struct ata_port_operations ali_dma_base_ops = {
.inherits = &ata_bmdma32_port_ops,
.set_piomode = ali_set_piomode,
.set_dmamode = ali_set_dmamode,
};
/*
* Port operations for DMA capable ALi without cable
* detect
*/
static struct ata_port_operations ali_20_port_ops = {
.inherits = &ali_dma_base_ops,
.cable_detect = ata_cable_40wire,
.mode_filter = ali_20_filter,
.check_atapi_dma = ali_check_atapi_dma,
.dev_config = ali_lock_sectors,
};
/*
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c2_port_ops = {
.inherits = &ali_dma_base_ops,
.check_atapi_dma = ali_check_atapi_dma,
.cable_detect = ali_c2_cable_detect,
.dev_config = ali_lock_sectors,
.postreset = ali_c2_c3_postreset,
};
/*
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c4_port_ops = {
.inherits = &ali_dma_base_ops,
.check_atapi_dma = ali_check_atapi_dma,
.cable_detect = ali_c2_cable_detect,
.dev_config = ali_lock_sectors,
};
/*
* Port operations for DMA capable ALi with cable detect and LBA48
*/
static struct ata_port_operations ali_c5_port_ops = {
.inherits = &ali_dma_base_ops,
.check_atapi_dma = ali_check_atapi_dma,
.dev_config = ali_warn_atapi_dma,
.cable_detect = ali_c2_cable_detect,
};
/**
* ali_init_chipset - chip setup function
* @pdev: PCI device of ATA controller
*
* Perform the setup on the device that must be done both at boot
* and at resume time.
*/
static void ali_init_chipset(struct pci_dev *pdev)
{
u8 tmp;
struct pci_dev *north;
/*
* The chipset revision selects the driver operations and
* mode data.
*/
if (pdev->revision <= 0x20) {
pci_read_config_byte(pdev, 0x53, &tmp);
tmp |= 0x03;
pci_write_config_byte(pdev, 0x53, tmp);
} else {
pci_read_config_byte(pdev, 0x4a, &tmp);
pci_write_config_byte(pdev, 0x4a, tmp | 0x20);
pci_read_config_byte(pdev, 0x4B, &tmp);
if (pdev->revision < 0xC2)
/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
/* Clear CD-ROM DMA write bit */
tmp &= 0x7F;
/* Cable and UDMA */
if (pdev->revision >= 0xc2)
tmp |= 0x01;
pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
/*
* CD_ROM DMA on (0x53 bit 0). Enable this even if we want
* to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
* via 0x54/55.
*/
pci_read_config_byte(pdev, 0x53, &tmp);
if (pdev->revision >= 0xc7)
tmp |= 0x03;
else
tmp |= 0x01; /* CD_ROM enable for DMA */
pci_write_config_byte(pdev, 0x53, tmp);
}
north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) {
/* Configure the ALi bridge logic. For non ALi rely on BIOS.
Set the south bridge enable bit */
pci_read_config_byte(ali_isa_bridge, 0x79, &tmp);
if (pdev->revision == 0xC2)
pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x04);
else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x02);
}
pci_dev_put(north);
ata_pci_bmdma_clear_simplex(pdev);
}
/**
* ali_init_one - discovery callback
* @pdev: PCI device ID
* @id: PCI table info
*
* An ALi IDE interface has been discovered. Figure out what revision
* and perform configuration work before handing it to the ATA layer
*/
static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info_early = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &ali_early_port_ops
};
/* Revision 0x20 added DMA */
static const struct ata_port_info info_20 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.port_ops = &ali_20_port_ops
};
/* Revision 0x20 with support logic added UDMA */
static const struct ata_port_info info_20_udma = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
.port_ops = &ali_20_port_ops
};
/* Revision 0xC2 adds UDMA66 */
static const struct ata_port_info info_c2 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &ali_c2_port_ops
};
/* Revision 0xC3 is UDMA66 for now */
static const struct ata_port_info info_c3 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
.port_ops = &ali_c2_port_ops
};
/* Revision 0xC4 is UDMA100 */
static const struct ata_port_info info_c4 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &ali_c4_port_ops
};
/* Revision 0xC5 is UDMA133 with LBA48 DMA */
static const struct ata_port_info info_c5 = {
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &ali_c5_port_ops
};
const struct ata_port_info *ppi[] = { NULL, NULL };
u8 tmp;
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/*
* The chipset revision selects the driver operations and
* mode data.
*/
if (pdev->revision < 0x20) {
ppi[0] = &info_early;
} else if (pdev->revision < 0xC2) {
ppi[0] = &info_20;
} else if (pdev->revision == 0xC2) {
ppi[0] = &info_c2;
} else if (pdev->revision == 0xC3) {
ppi[0] = &info_c3;
} else if (pdev->revision == 0xC4) {
ppi[0] = &info_c4;
} else
ppi[0] = &info_c5;
ali_init_chipset(pdev);
if (ali_isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
/* Are we paired with a UDMA capable chip */
pci_read_config_byte(ali_isa_bridge, 0x5E, &tmp);
if ((tmp & 0x1E) == 0x12)
ppi[0] = &info_20_udma;
}
if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
else
return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
}
#ifdef CONFIG_PM
static int ali_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
ali_init_chipset(pdev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id ali[] = {
{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
{ },
};
static struct pci_driver ali_pci_driver = {
.name = DRV_NAME,
.id_table = ali,
.probe = ali_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ali_reinit_one,
#endif
};
static int __init ali_init(void)
{
int ret;
ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
ret = pci_register_driver(&ali_pci_driver);
if (ret < 0)
pci_dev_put(ali_isa_bridge);
return ret;
}
static void __exit ali_exit(void)
{
pci_unregister_driver(&ali_pci_driver);
pci_dev_put(ali_isa_bridge);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ALi PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ali);
MODULE_VERSION(DRV_VERSION);
module_init(ali_init);
module_exit(ali_exit);
| gpl-2.0 |
Crossbones/crossbones_kernel_tuna | drivers/scsi/sim710.c | 9026 | 10023 | /*
* sim710.c - Copyright (C) 1999 Richard Hirst <richard@sleepie.demon.co.uk>
*
*----------------------------------------------------------------------------
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*----------------------------------------------------------------------------
*
* MCA card detection code by Trent McNair.
* Fixes to not explicitly nul bss data from Xavier Bestel.
* Some multiboard fixes from Rolf Eike Beer.
* Auto probing of EISA config space from Trevor Hemsley.
*
* Rewritten to use 53c700.c by James.Bottomley@SteelEye.com
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/mca.h>
#include <linux/eisa.h>
#include <linux/interrupt.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
#include "53c700.h"
/* Must be enough for both EISA and MCA */
#define MAX_SLOTS 8
static __u8 __initdata id_array[MAX_SLOTS] = { [0 ... MAX_SLOTS-1] = 7 };
static char *sim710; /* command line passed by insmod */
MODULE_AUTHOR("Richard Hirst");
MODULE_DESCRIPTION("Simple NCR53C710 driver");
MODULE_LICENSE("GPL");
module_param(sim710, charp, 0);
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
static __init int
param_setup(char *str)
{
char *pos = str, *next;
int slot = -1;
while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
int val = (int)simple_strtoul(++next, NULL, 0);
if(!strncmp(pos, "slot:", 5))
slot = val;
else if(!strncmp(pos, "id:", 3)) {
if(slot == -1) {
printk(KERN_WARNING "sim710: Must specify slot for id parameter\n");
} else if(slot >= MAX_SLOTS) {
printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val);
} else {
id_array[slot] = val;
}
}
if((pos = strchr(pos, ARG_SEP)) != NULL)
pos++;
}
return 1;
}
__setup("sim710=", param_setup);
static struct scsi_host_template sim710_driver_template = {
.name = "LSI (Symbios) 710 MCA/EISA",
.proc_name = "sim710",
.this_id = 7,
.module = THIS_MODULE,
};
static __devinit int
sim710_probe_common(struct device *dev, unsigned long base_addr,
int irq, int clock, int differential, int scsi_id)
{
struct Scsi_Host * host = NULL;
struct NCR_700_Host_Parameters *hostdata =
kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
printk(KERN_NOTICE "sim710: %s\n", dev_name(dev));
printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
irq, clock, base_addr, scsi_id);
if(hostdata == NULL) {
printk(KERN_ERR "sim710: Failed to allocate host data\n");
goto out;
}
if(request_region(base_addr, 64, "sim710") == NULL) {
printk(KERN_ERR "sim710: Failed to reserve IO region 0x%lx\n",
base_addr);
goto out_free;
}
/* Fill in the three required pieces of hostdata */
hostdata->base = ioport_map(base_addr, 64);
hostdata->differential = differential;
hostdata->clock = clock;
hostdata->chip710 = 1;
hostdata->burst_length = 8;
/* and register the chip */
if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev))
== NULL) {
printk(KERN_ERR "sim710: No host detected; card configuration problem?\n");
goto out_release;
}
host->this_id = scsi_id;
host->base = base_addr;
host->irq = irq;
if (request_irq(irq, NCR_700_intr, IRQF_SHARED, "sim710", host)) {
printk(KERN_ERR "sim710: request_irq failed\n");
goto out_put_host;
}
dev_set_drvdata(dev, host);
scsi_scan_host(host);
return 0;
out_put_host:
scsi_host_put(host);
out_release:
release_region(base_addr, 64);
out_free:
kfree(hostdata);
out:
return -ENODEV;
}
static __devexit int
sim710_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
scsi_remove_host(host);
NCR_700_release(host);
kfree(hostdata);
free_irq(host->irq, host);
release_region(host->base, 64);
return 0;
}
#ifdef CONFIG_MCA
/* CARD ID 01BB and 01BA use the same pos values */
#define MCA_01BB_IO_PORTS { 0x0000, 0x0000, 0x0800, 0x0C00, 0x1000, 0x1400, \
0x1800, 0x1C00, 0x2000, 0x2400, 0x2800, \
0x2C00, 0x3000, 0x3400, 0x3800, 0x3C00, \
0x4000, 0x4400, 0x4800, 0x4C00, 0x5000 }
#define MCA_01BB_IRQS { 3, 5, 11, 14 }
/* CARD ID 004f */
#define MCA_004F_IO_PORTS { 0x0000, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600 }
#define MCA_004F_IRQS { 5, 9, 14 }
static short sim710_mca_id_table[] = { 0x01bb, 0x01ba, 0x004f, 0};
static __init int
sim710_mca_probe(struct device *dev)
{
struct mca_device *mca_dev = to_mca_device(dev);
int slot = mca_dev->slot;
int pos[3];
unsigned int base;
int irq_vector;
short id = sim710_mca_id_table[mca_dev->index];
static int io_004f_by_pos[] = MCA_004F_IO_PORTS;
static int irq_004f_by_pos[] = MCA_004F_IRQS;
static int io_01bb_by_pos[] = MCA_01BB_IO_PORTS;
static int irq_01bb_by_pos[] = MCA_01BB_IRQS;
char *name;
int clock;
pos[0] = mca_device_read_stored_pos(mca_dev, 2);
pos[1] = mca_device_read_stored_pos(mca_dev, 3);
pos[2] = mca_device_read_stored_pos(mca_dev, 4);
/*
* 01BB & 01BA port base by bits 7,6,5,4,3,2 in pos[2]
*
* 000000 <disabled> 001010 0x2800
* 000001 <invalid> 001011 0x2C00
* 000010 0x0800 001100 0x3000
* 000011 0x0C00 001101 0x3400
* 000100 0x1000 001110 0x3800
* 000101 0x1400 001111 0x3C00
* 000110 0x1800 010000 0x4000
* 000111 0x1C00 010001 0x4400
* 001000 0x2000 010010 0x4800
* 001001 0x2400 010011 0x4C00
* 010100 0x5000
*
* 00F4 port base by bits 3,2,1 in pos[0]
*
* 000 <disabled> 001 0x200
* 010 0x300 011 0x400
* 100 0x500 101 0x600
*
* 01BB & 01BA IRQ is specified in pos[0] bits 7 and 6:
*
* 00 3 10 11
* 01 5 11 14
*
* 00F4 IRQ specified by bits 6,5,4 in pos[0]
*
* 100 5 101 9
* 110 14
*/
if (id == 0x01bb || id == 0x01ba) {
base = io_01bb_by_pos[(pos[2] & 0xFC) >> 2];
irq_vector =
irq_01bb_by_pos[((pos[0] & 0xC0) >> 6)];
clock = 50;
if (id == 0x01bb)
name = "NCR 3360/3430 SCSI SubSystem";
else
name = "NCR Dual SIOP SCSI Host Adapter Board";
} else if ( id == 0x004f ) {
base = io_004f_by_pos[((pos[0] & 0x0E) >> 1)];
irq_vector =
irq_004f_by_pos[((pos[0] & 0x70) >> 4) - 4];
clock = 50;
name = "NCR 53c710 SCSI Host Adapter Board";
} else {
return -ENODEV;
}
mca_device_set_name(mca_dev, name);
mca_device_set_claim(mca_dev, 1);
base = mca_device_transform_ioport(mca_dev, base);
irq_vector = mca_device_transform_irq(mca_dev, irq_vector);
return sim710_probe_common(dev, base, irq_vector, clock,
0, id_array[slot]);
}
static struct mca_driver sim710_mca_driver = {
.id_table = sim710_mca_id_table,
.driver = {
.name = "sim710",
.bus = &mca_bus_type,
.probe = sim710_mca_probe,
.remove = __devexit_p(sim710_device_remove),
},
};
#endif /* CONFIG_MCA */
#ifdef CONFIG_EISA
static struct eisa_device_id sim710_eisa_ids[] = {
{ "CPQ4410" },
{ "CPQ4411" },
{ "HWP0C80" },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
static __init int
sim710_eisa_probe(struct device *dev)
{
struct eisa_device *edev = to_eisa_device(dev);
unsigned long io_addr = edev->base_addr;
char eisa_cpq_irqs[] = { 11, 14, 15, 10, 9, 0 };
char eisa_hwp_irqs[] = { 3, 4, 5, 7, 12, 10, 11, 0};
char *eisa_irqs;
unsigned char irq_index;
unsigned char irq, differential = 0, scsi_id = 7;
if(strcmp(edev->id.sig, "HWP0C80") == 0) {
__u8 val;
eisa_irqs = eisa_hwp_irqs;
irq_index = (inb(io_addr + 0xc85) & 0x7) - 1;
val = inb(io_addr + 0x4);
scsi_id = ffs(val) - 1;
if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) {
printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev_name(dev));
scsi_id = 7;
}
} else {
eisa_irqs = eisa_cpq_irqs;
irq_index = inb(io_addr + 0xc88) & 0x07;
}
if(irq_index >= strlen(eisa_irqs)) {
printk("sim710.c: irq nasty\n");
return -ENODEV;
}
irq = eisa_irqs[irq_index];
return sim710_probe_common(dev, io_addr, irq, 50,
differential, scsi_id);
}
static struct eisa_driver sim710_eisa_driver = {
.id_table = sim710_eisa_ids,
.driver = {
.name = "sim710",
.probe = sim710_eisa_probe,
.remove = __devexit_p(sim710_device_remove),
},
};
#endif /* CONFIG_EISA */
static int __init sim710_init(void)
{
int err = -ENODEV;
#ifdef MODULE
if (sim710)
param_setup(sim710);
#endif
#ifdef CONFIG_MCA
err = mca_register_driver(&sim710_mca_driver);
#endif
#ifdef CONFIG_EISA
err = eisa_driver_register(&sim710_eisa_driver);
#endif
/* FIXME: what we'd really like to return here is -ENODEV if
* no devices have actually been found. Instead, the err
* above actually only reports problems with kobject_register,
* so for the moment return success */
return 0;
}
static void __exit sim710_exit(void)
{
#ifdef CONFIG_MCA
if (MCA_bus)
mca_unregister_driver(&sim710_mca_driver);
#endif
#ifdef CONFIG_EISA
eisa_driver_unregister(&sim710_eisa_driver);
#endif
}
module_init(sim710_init);
module_exit(sim710_exit);
| gpl-2.0 |
YagiAsuka/sample | arch/mips/bcm63xx/timer.c | 12610 | 4510 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_timer.h>
#include <bcm63xx_regs.h>
static DEFINE_RAW_SPINLOCK(timer_reg_lock);
static DEFINE_RAW_SPINLOCK(timer_data_lock);
static struct clk *periph_clk;
static struct timer_data {
void (*cb)(void *);
void *data;
} timer_data[BCM63XX_TIMER_COUNT];
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
u32 stat;
int i;
raw_spin_lock(&timer_reg_lock);
stat = bcm_timer_readl(TIMER_IRQSTAT_REG);
bcm_timer_writel(stat, TIMER_IRQSTAT_REG);
raw_spin_unlock(&timer_reg_lock);
for (i = 0; i < BCM63XX_TIMER_COUNT; i++) {
if (!(stat & TIMER_IRQSTAT_TIMER_CAUSE(i)))
continue;
raw_spin_lock(&timer_data_lock);
if (!timer_data[i].cb) {
raw_spin_unlock(&timer_data_lock);
continue;
}
timer_data[i].cb(timer_data[i].data);
raw_spin_unlock(&timer_data_lock);
}
return IRQ_HANDLED;
}
int bcm63xx_timer_enable(int id)
{
u32 reg;
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return -EINVAL;
raw_spin_lock_irqsave(&timer_reg_lock, flags);
reg = bcm_timer_readl(TIMER_CTLx_REG(id));
reg |= TIMER_CTL_ENABLE_MASK;
bcm_timer_writel(reg, TIMER_CTLx_REG(id));
reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
reg |= TIMER_IRQSTAT_TIMER_IR_EN(id);
bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_timer_enable);
int bcm63xx_timer_disable(int id)
{
u32 reg;
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return -EINVAL;
raw_spin_lock_irqsave(&timer_reg_lock, flags);
reg = bcm_timer_readl(TIMER_CTLx_REG(id));
reg &= ~TIMER_CTL_ENABLE_MASK;
bcm_timer_writel(reg, TIMER_CTLx_REG(id));
reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
reg &= ~TIMER_IRQSTAT_TIMER_IR_EN(id);
bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_timer_disable);
int bcm63xx_timer_register(int id, void (*callback)(void *data), void *data)
{
unsigned long flags;
int ret;
if (id >= BCM63XX_TIMER_COUNT || !callback)
return -EINVAL;
ret = 0;
raw_spin_lock_irqsave(&timer_data_lock, flags);
if (timer_data[id].cb) {
ret = -EBUSY;
goto out;
}
timer_data[id].cb = callback;
timer_data[id].data = data;
out:
raw_spin_unlock_irqrestore(&timer_data_lock, flags);
return ret;
}
EXPORT_SYMBOL(bcm63xx_timer_register);
void bcm63xx_timer_unregister(int id)
{
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return;
raw_spin_lock_irqsave(&timer_data_lock, flags);
timer_data[id].cb = NULL;
raw_spin_unlock_irqrestore(&timer_data_lock, flags);
}
EXPORT_SYMBOL(bcm63xx_timer_unregister);
unsigned int bcm63xx_timer_countdown(unsigned int countdown_us)
{
return (clk_get_rate(periph_clk) / (1000 * 1000)) * countdown_us;
}
EXPORT_SYMBOL(bcm63xx_timer_countdown);
int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us)
{
u32 reg, countdown;
unsigned long flags;
if (id >= BCM63XX_TIMER_COUNT)
return -EINVAL;
countdown = bcm63xx_timer_countdown(countdown_us);
if (countdown & ~TIMER_CTL_COUNTDOWN_MASK)
return -EINVAL;
raw_spin_lock_irqsave(&timer_reg_lock, flags);
reg = bcm_timer_readl(TIMER_CTLx_REG(id));
if (monotonic)
reg &= ~TIMER_CTL_MONOTONIC_MASK;
else
reg |= TIMER_CTL_MONOTONIC_MASK;
reg &= ~TIMER_CTL_COUNTDOWN_MASK;
reg |= countdown;
bcm_timer_writel(reg, TIMER_CTLx_REG(id));
raw_spin_unlock_irqrestore(&timer_reg_lock, flags);
return 0;
}
EXPORT_SYMBOL(bcm63xx_timer_set);
int bcm63xx_timer_init(void)
{
int ret, irq;
u32 reg;
reg = bcm_timer_readl(TIMER_IRQSTAT_REG);
reg &= ~TIMER_IRQSTAT_TIMER0_IR_EN;
reg &= ~TIMER_IRQSTAT_TIMER1_IR_EN;
reg &= ~TIMER_IRQSTAT_TIMER2_IR_EN;
bcm_timer_writel(reg, TIMER_IRQSTAT_REG);
periph_clk = clk_get(NULL, "periph");
if (IS_ERR(periph_clk))
return -ENODEV;
irq = bcm63xx_get_irq_number(IRQ_TIMER);
ret = request_irq(irq, timer_interrupt, 0, "bcm63xx_timer", NULL);
if (ret) {
printk(KERN_ERR "bcm63xx_timer: failed to register irq\n");
return ret;
}
return 0;
}
arch_initcall(bcm63xx_timer_init);
| gpl-2.0 |
popazerty/linux-sh4-2.6.32.y | arch/powerpc/boot/mktree.c | 13634 | 3584 | /*
* Makes a tree bootable image for IBM Evaluation boards.
* Basically, just take a zImage, skip the ELF header, and stuff
* a 32 byte header on the front.
*
* We use htonl, which is a network macro, to make sure we're doing
* The Right Thing on an LE machine. It's non-obvious, but it should
* work on anything BSD'ish.
*/
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <netinet/in.h>
#ifdef __sun__
#include <inttypes.h>
#else
#include <stdint.h>
#endif
/* This gets tacked on the front of the image. There are also a few
* bytes allocated after the _start label used by the boot rom (see
* head.S for details).
*/
typedef struct boot_block {
uint32_t bb_magic; /* 0x0052504F */
uint32_t bb_dest; /* Target address of the image */
uint32_t bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
uint32_t bb_debug_flag; /* Run debugger or image after load */
uint32_t bb_entry_point; /* The image address to start */
uint32_t bb_checksum; /* 32 bit checksum including header */
uint32_t reserved[2];
} boot_block_t;
#define IMGBLK 512
unsigned int tmpbuf[IMGBLK / sizeof(unsigned int)];
int main(int argc, char *argv[])
{
int in_fd, out_fd;
int nblks, i;
unsigned int cksum, *cp;
struct stat st;
boot_block_t bt;
if (argc < 5) {
fprintf(stderr, "usage: %s <zImage-file> <boot-image> <load address> <entry point>\n",argv[0]);
exit(1);
}
if (stat(argv[1], &st) < 0) {
perror("stat");
exit(2);
}
nblks = (st.st_size + IMGBLK) / IMGBLK;
bt.bb_magic = htonl(0x0052504F);
/* If we have the optional entry point parameter, use it */
bt.bb_dest = htonl(strtoul(argv[3], NULL, 0));
bt.bb_entry_point = htonl(strtoul(argv[4], NULL, 0));
/* We know these from the linker command.
* ...and then move it up into memory a little more so the
* relocation can happen.
*/
bt.bb_num_512blocks = htonl(nblks);
bt.bb_debug_flag = 0;
bt.bb_checksum = 0;
/* To be neat and tidy :-).
*/
bt.reserved[0] = 0;
bt.reserved[1] = 0;
if ((in_fd = open(argv[1], O_RDONLY)) < 0) {
perror("zImage open");
exit(3);
}
if ((out_fd = open(argv[2], (O_RDWR | O_CREAT | O_TRUNC), 0666)) < 0) {
perror("bootfile open");
exit(3);
}
cksum = 0;
cp = (void *)&bt;
for (i = 0; i < sizeof(bt) / sizeof(unsigned int); i++)
cksum += *cp++;
/* Assume zImage is an ELF file, and skip the 64K header.
*/
if (read(in_fd, tmpbuf, sizeof(tmpbuf)) != sizeof(tmpbuf)) {
fprintf(stderr, "%s is too small to be an ELF image\n",
argv[1]);
exit(4);
}
if (tmpbuf[0] != htonl(0x7f454c46)) {
fprintf(stderr, "%s is not an ELF image\n", argv[1]);
exit(4);
}
if (lseek(in_fd, (64 * 1024), SEEK_SET) < 0) {
fprintf(stderr, "%s failed to seek in ELF image\n", argv[1]);
exit(4);
}
nblks -= (64 * 1024) / IMGBLK;
/* And away we go......
*/
if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
perror("boot-image write");
exit(5);
}
while (nblks-- > 0) {
if (read(in_fd, tmpbuf, sizeof(tmpbuf)) < 0) {
perror("zImage read");
exit(5);
}
cp = tmpbuf;
for (i = 0; i < sizeof(tmpbuf) / sizeof(unsigned int); i++)
cksum += *cp++;
if (write(out_fd, tmpbuf, sizeof(tmpbuf)) != sizeof(tmpbuf)) {
perror("boot-image write");
exit(5);
}
}
/* rewrite the header with the computed checksum.
*/
bt.bb_checksum = htonl(cksum);
if (lseek(out_fd, 0, SEEK_SET) < 0) {
perror("rewrite seek");
exit(1);
}
if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
perror("boot-image rewrite");
exit(1);
}
exit(0);
}
| gpl-2.0 |
georgecherian/linux | drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 67 | 6971 | /*
* Copyright (c) 2011 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/dcbnl.h>
#include <linux/math64.h>
#include "mlx4_en.h"
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
/* No IEEE PFC settings available */
if (!my_ets)
return -EINVAL;
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
return 0;
}
static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
{
int i;
int total_ets_bw = 0;
int has_ets_tc = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
i, ets->prio_tc[i]);
return -EINVAL;
}
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
has_ets_tc = 1;
total_ets_bw += ets->tc_tx_bw[i];
break;
default:
en_err(priv, "TC[%d]: Not supported TSA: %d\n",
i, ets->tc_tsa[i]);
return -ENOTSUPP;
}
}
if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
total_ets_bw);
return -EINVAL;
}
return 0;
}
static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
struct ieee_ets *ets, u16 *ratelimit)
{
struct mlx4_en_dev *mdev = priv->mdev;
int num_strict = 0;
int i;
__u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
__u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
ets = ets ?: &priv->ets;
ratelimit = ratelimit ?: priv->maxrate;
/* higher TC means higher priority => lower pg */
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
pg[i] = num_strict++;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
break;
case IEEE_8021QAZ_TSA_ETS:
pg[i] = MLX4_EN_TC_ETS;
tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
break;
}
}
return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
ratelimit);
}
static int
mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
err = mlx4_en_ets_validate(priv, ets);
if (err)
return err;
err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
if (err)
return err;
err = mlx4_en_config_port_scheduler(priv, ets, NULL);
if (err)
return err;
memcpy(&priv->ets, ets, sizeof(priv->ets));
return 0;
}
static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
pfc->pfc_en = priv->prof->tx_ppp;
return 0;
}
static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
pfc->pfc_cap,
pfc->pfc_en,
pfc->mbc,
pfc->delay);
priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
return err;
}
static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) ||
!(mode & DCB_CAP_DCBX_VER_IEEE) ||
!(mode & DCB_CAP_DCBX_HOST))
return 1;
return 0;
}
#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
maxrate->tc_maxrate[i] =
priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
return 0;
}
static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 tmp[IEEE_8021QAZ_MAX_TCS];
int i, err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
/* Convert from Kbps into HW units, rounding result up.
* Setting to 0, means unlimited BW.
*/
tmp[i] = div_u64(maxrate->tc_maxrate[i] +
MLX4_RATELIMIT_UNITS_IN_KB - 1,
MLX4_RATELIMIT_UNITS_IN_KB);
}
err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
if (err)
return err;
memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
return 0;
}
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
.ieee_getets = mlx4_en_dcbnl_ieee_getets,
.ieee_setets = mlx4_en_dcbnl_ieee_setets,
.ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
.ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
| gpl-2.0 |
alpinelinux/linux-stable-grsec | drivers/crypto/caam/caamhash.c | 67 | 57720 | /*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
*
* Based on caamalg.c crypto API driver.
*
* relationship of digest job descriptor or first job descriptor after init to
* shared descriptors:
*
* --------------- ---------------
* | JobDesc #1 |-------------------->| ShareDesc |
* | *(packet 1) | | (hashKey) |
* --------------- | (operation) |
* ---------------
*
* relationship of subsequent job descriptors to shared descriptors:
*
* --------------- ---------------
* | JobDesc #2 |-------------------->| ShareDesc |
* | *(packet 2) | |------------->| (hashKey) |
* --------------- | |-------->| (operation) |
* . | | | (load ctx2) |
* . | | ---------------
* --------------- | |
* | JobDesc #3 |------| |
* | *(packet 3) | |
* --------------- |
* . |
* . |
* --------------- |
* | JobDesc #4 |------------
* | *(packet 4) |
* ---------------
*
* The SharedDesc never changes for a connection unless rekeyed, but
* each packet will likely be in a different place. So all we need
* to know to process the packet is where the input is, where the
* output goes, and what context we want to process with. Context is
* in the SharedDesc, packet references in the JobDesc.
*
* So, a job desc looks like:
*
* ---------------------
* | Header |
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | (output buffer) |
* | (output length) |
* | SEQ_IN_PTR |
* | (input buffer) |
* | (input length) |
* ---------------------
*/
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
#include "jr.h"
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
#define CAAM_CRA_PRIORITY 3000
/* max hash key is max split key size */
#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
/* caam context sizes for hashes: running digest + 8 */
#define HASH_MSG_LEN 8
#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
#ifdef DEBUG
/* for print_hex_dumps with line references */
#define debug(format, arg...) printk(format, arg)
#else
#define debug(format, arg...)
#endif
static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
struct device *jrdev;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
dma_addr_t sh_desc_update_dma;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
dma_addr_t sh_desc_finup_dma;
u32 alg_type;
u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
dma_addr_t key_dma;
int ctx_len;
unsigned int split_key_len;
unsigned int split_key_pad_len;
};
/* ahash state */
struct caam_hash_state {
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_1;
u8 caam_ctx[MAX_CTX_LEN];
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*finup)(struct ahash_request *req);
int current_buf;
};
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
struct caam_hash_state *state,
int ctx_len)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
dev_err(jrdev, "unable to map ctx\n");
return -ENOMEM;
}
append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
return 0;
}
/* Map req->result, and append seq_out_ptr command that points to it */
static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
u8 *result, int digestsize)
{
dma_addr_t dst_dma;
dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
append_seq_out_ptr(desc, dst_dma, digestsize, 0);
return dst_dma;
}
/* Map current buffer in state and put it in link table */
static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
struct sec4_sg_entry *sec4_sg,
u8 *buf, int buflen)
{
dma_addr_t buf_dma;
buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
return buf_dma;
}
/* Map req->src and put it in link table */
static inline void src_map_to_sec4_sg(struct device *jrdev,
struct scatterlist *src, int src_nents,
struct sec4_sg_entry *sec4_sg,
bool chained)
{
dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
}
/*
* Only put buffer in link table if it contains data, which is possible,
* since a buffer has previously been used, and needs to be unmapped,
*/
static inline dma_addr_t
try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
u8 *buf, dma_addr_t buf_dma, int buflen,
int last_buflen)
{
if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
if (buflen)
buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
else
buf_dma = 0;
return buf_dma;
}
/* Map state->caam_ctx, and add it to link table */
static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(jrdev, state->ctx_dma)) {
dev_err(jrdev, "unable to map ctx\n");
return -ENOMEM;
}
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
return 0;
}
/* Common shared descriptor commands */
static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
ctx->split_key_len, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
}
/* Append key if it has been set */
static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
{
u32 *key_jump_cmd;
init_sh_desc(desc, HDR_SHARE_SERIAL);
if (ctx->split_key_len) {
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
append_key_ahash(desc, ctx);
set_jump_tgt_here(desc, key_jump_cmd);
}
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
/*
* For ahash read data from seqin following state->caam_ctx,
* and write resulting class2 context to seqout, which may be state->caam_ctx
* or req->result
*/
static inline void ahash_append_load_str(u32 *desc, int digestsize)
{
/* Calculate remaining bytes to read */
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
/* Read remaining bytes */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
FIFOLD_TYPE_MSG | KEY_VLF);
/* Store class2 context bytes */
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
}
/*
* For ahash update, final and finup, import context, read and write to seqout
*/
static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
int digestsize,
struct caam_hash_ctx *ctx)
{
init_sh_desc_key_ahash(desc, ctx);
/* Import context from software */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_2_CCB | ctx->ctx_len);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
*/
ahash_append_load_str(desc, digestsize);
}
/* For ahash firsts and digest, read and write to seqout */
static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
int digestsize, struct caam_hash_ctx *ctx)
{
init_sh_desc_key_ahash(desc, ctx);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
*/
ahash_append_load_str(desc, digestsize);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
u32 have_key = 0;
u32 *desc;
if (ctx->split_key_len)
have_key = OP_ALG_AAI_HMAC_PRECOMP;
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Import context from software */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_2_CCB | ctx->ctx_len);
/* Class 2 operation */
append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
OP_ALG_ENCRYPT);
/* Load data and write to result or context */
ahash_append_load_str(desc, ctx->ctx_len);
ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
ctx->ctx_len, ctx);
ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
OP_ALG_AS_FINALIZE, digestsize, ctx);
ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
/* ahash_finup shared descriptor */
desc = ctx->sh_desc_finup;
ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
OP_ALG_AS_FINALIZE, digestsize, ctx);
ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
digestsize, ctx);
ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
return 0;
}
static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
ctx->split_key_pad_len, key_in, keylen,
ctx->alg_op);
}
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
u32 *desc;
struct split_key_result result;
dma_addr_t src_dma, dst_dma;
int ret = 0;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
}
init_job_desc(desc, 0);
src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, src_dma)) {
dev_err(jrdev, "unable to map key input memory\n");
kfree(desc);
return -ENOMEM;
}
dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dst_dma)) {
dev_err(jrdev, "unable to map key output memory\n");
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
kfree(desc);
return -ENOMEM;
}
/* Job descriptor to perform unkeyed hash on key_in */
append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
append_seq_out_ptr(desc, dst_dma, digestsize, 0);
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
result.err = 0;
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
wait_for_completion_interruptible(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"digested key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in,
digestsize, 1);
#endif
}
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
*keylen = digestsize;
kfree(desc);
return ret;
}
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
int ret = 0;
u8 *hashed_key = NULL;
#ifdef DEBUG
printk(KERN_ERR "keylen %d\n", keylen);
#endif
if (keylen > blocksize) {
hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
GFP_DMA);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, key, &keylen, hashed_key,
digestsize);
if (ret)
goto badkey;
key = hashed_key;
}
/* Pick class 2 key length from algorithm submask */
ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT] * 2;
ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
#ifdef DEBUG
printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
ret = gen_split_hash_key(ctx, key, keylen);
if (ret)
goto badkey;
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
ret = -ENOMEM;
goto map_err;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->split_key_pad_len, 1);
#endif
ret = ahash_set_sh_desc(ahash);
if (ret) {
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
DMA_TO_DEVICE);
}
map_err:
kfree(hashed_key);
return ret;
badkey:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*
* ahash_edesc - s/w-extended ahash descriptor
* @dst_dma: physical mapped address of req->result
* @sec4_sg_dma: physical mapped address of h/w link table
* @chained: if source is chained
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ahash_edesc {
dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
bool chained;
int src_nents;
int sec4_sg_bytes;
struct sec4_sg_entry *sec4_sg;
u32 hw_desc[0];
};
static inline void ahash_unmap(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
if (edesc->src_nents)
dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
DMA_TO_DEVICE, edesc->chained);
if (edesc->dst_dma)
dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
}
static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len, u32 flag)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
if (state->ctx_dma)
dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
ahash_unmap(dev, edesc, req, dst_len);
}
static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
#ifdef DEBUG
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
int digestsize = crypto_ahash_digestsize(ahash);
#ifdef DEBUG
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
struct ahash_request *req = context;
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
#ifdef DEBUG
struct caam_hash_state *state = ahash_request_ctx(req);
int digestsize = crypto_ahash_digestsize(ahash);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
edesc = (struct ahash_edesc *)((char *)desc -
offsetof(struct ahash_edesc, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
kfree(edesc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
if (req->result)
print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
digestsize, 1);
#endif
req->base.complete(&req->base, err);
}
/* submit update job descriptor */
static int ahash_update_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1, last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *sh_desc = ctx->sh_desc_update, *desc;
dma_addr_t ptr = ctx->sh_desc_update_dma;
int src_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
bool chained = false;
int ret = 0;
int sh_len;
last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
if (to_hash) {
src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
&chained);
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
buf, state->buf_dma,
*buflen, last_buflen);
if (src_nents) {
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + sec4_sg_src_index,
chained);
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
state->current_buf = !state->current_buf;
}
} else {
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
SEC4_SG_LEN_FIN;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
to_hash, LDST_SGF);
append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
DMA_BIDIRECTIONAL);
kfree(edesc);
}
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = last_buflen;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
return ret;
}
static int ahash_final_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
int sec4_sg_bytes;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
edesc->src_nents = 0;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
return ret;
}
static int ahash_finup_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_finup, *desc;
dma_addr_t ptr = ctx->sh_desc_finup_dma;
int sec4_sg_bytes, sec4_sg_src_index;
int src_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
bool chained = false;
int ret = 0;
int sh_len;
src_nents = __sg_count(req->src, req->nbytes, &chained);
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
return ret;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
sec4_sg_src_index, chained);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
buflen + req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
kfree(edesc);
}
return ret;
}
static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, sec4_sg_bytes;
dma_addr_t src_dma;
struct ahash_edesc *edesc;
bool chained = false;
int ret = 0;
u32 options;
int sh_len;
src_nents = sg_count(req->src, req->nbytes, &chained);
dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
chained);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->src_nents = src_nents;
edesc->chained = chained;
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
src_dma = sg_dma_address(req->src);
options = 0;
}
append_seq_in_ptr(desc, src_dma, req->nbytes, options);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
}
return ret;
}
/* submit ahash final if it the first job descriptor */
static int ahash_final_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
edesc->src_nents = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
}
return ret;
}
/* submit ahash update if it the first job descriptor after update */
static int ahash_update_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1;
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents;
struct ahash_edesc *edesc;
u32 *desc, *sh_desc = ctx->sh_desc_update_first;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
bool chained = false;
int ret = 0;
int sh_len;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
if (to_hash) {
src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
&chained);
sec4_sg_bytes = (1 + src_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
edesc->dst_dma = 0;
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + 1, chained);
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
state->current_buf = !state->current_buf;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
if (!ret) {
ret = -EINPROGRESS;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
} else {
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
DMA_TO_DEVICE);
kfree(edesc);
}
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
*buflen = *next_buflen;
*next_buflen = 0;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
return ret;
}
/* submit ahash finup if it the first job descriptor after update */
static int ahash_finup_no_ctx(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_digest, *desc;
dma_addr_t ptr = ctx->sh_desc_digest_dma;
int sec4_sg_bytes, sec4_sg_src_index, src_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
bool chained = false;
int sh_len;
int ret = 0;
src_nents = __sg_count(req->src, req->nbytes, &chained);
sec4_sg_src_index = 2;
sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
}
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
state->buf_dma, buflen,
last_buflen);
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
chained);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
req->nbytes, LDST_SGF);
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
ahash_unmap(jrdev, edesc, req, digestsize);
kfree(edesc);
}
return ret;
}
/* submit first update job descriptor after init */
static int ahash_update_first(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
int *next_buflen = state->current_buf ?
&state->buflen_1 : &state->buflen_0;
int to_hash;
u32 *sh_desc = ctx->sh_desc_update_first, *desc;
dma_addr_t ptr = ctx->sh_desc_update_first_dma;
int sec4_sg_bytes, src_nents;
dma_addr_t src_dma;
u32 options;
struct ahash_edesc *edesc;
bool chained = false;
int ret = 0;
int sh_len;
*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1);
to_hash = req->nbytes - *next_buflen;
if (to_hash) {
src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
&chained);
dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
DMA_TO_DEVICE, chained);
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
"could not allocate extended descriptor\n");
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->chained = chained;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
edesc->dst_dma = 0;
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents,
edesc->sec4_sg, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev,
edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
return -ENOMEM;
}
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
src_dma = sg_dma_address(req->src);
options = 0;
}
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
*next_buflen, 0);
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
append_seq_in_ptr(desc, src_dma, to_hash, options);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
return ret;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
req);
if (!ret) {
ret = -EINPROGRESS;
state->update = ahash_update_ctx;
state->finup = ahash_finup_ctx;
state->final = ahash_final_ctx;
} else {
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
DMA_TO_DEVICE);
kfree(edesc);
}
} else if (*next_buflen) {
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
state->final = ahash_final_no_ctx;
scatterwalk_map_and_copy(next_buf, req->src, 0,
req->nbytes, 0);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
*next_buflen, 1);
#endif
return ret;
}
static int ahash_finup_first(struct ahash_request *req)
{
return ahash_digest(req);
}
static int ahash_init(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
state->update = ahash_update_first;
state->finup = ahash_finup_first;
state->final = ahash_final_no_ctx;
state->current_buf = 0;
state->buf_dma = 0;
state->buflen_0 = 0;
state->buflen_1 = 0;
return 0;
}
static int ahash_update(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
return state->update(req);
}
static int ahash_finup(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
return state->finup(req);
}
static int ahash_final(struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
return state->final(req);
}
static int ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
memcpy(out, ctx, sizeof(struct caam_hash_ctx));
memcpy(out + sizeof(struct caam_hash_ctx), state,
sizeof(struct caam_hash_state));
return 0;
}
static int ahash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
memcpy(ctx, in, sizeof(struct caam_hash_ctx));
memcpy(state, in + sizeof(struct caam_hash_ctx),
sizeof(struct caam_hash_state));
return 0;
}
struct caam_hash_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
char hmac_name[CRYPTO_MAX_ALG_NAME];
char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
u32 alg_op;
};
/* ahash descriptors */
static struct caam_hash_template driver_hash[] = {
{
.name = "sha1",
.driver_name = "sha1-caam",
.hmac_name = "hmac(sha1)",
.hmac_driver_name = "hmac-sha1-caam",
.blocksize = SHA1_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
.hmac_name = "hmac(sha224)",
.hmac_driver_name = "hmac-sha224-caam",
.blocksize = SHA224_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
.hmac_name = "hmac(sha256)",
.hmac_driver_name = "hmac-sha256-caam",
.blocksize = SHA256_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
.hmac_name = "hmac(sha384)",
.hmac_driver_name = "hmac-sha384-caam",
.blocksize = SHA384_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
.hmac_name = "hmac(sha512)",
.hmac_driver_name = "hmac-sha512-caam",
.blocksize = SHA512_BLOCK_SIZE,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
.hmac_name = "hmac(md5)",
.hmac_driver_name = "hmac-md5-caam",
.blocksize = MD5_BLOCK_WORDS * 4,
.template_ahash = {
.init = ahash_init,
.update = ahash_update,
.final = ahash_final,
.finup = ahash_finup,
.digest = ahash_digest,
.export = ahash_export,
.import = ahash_import,
.setkey = ahash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
int alg_op;
struct ahash_alg ahash_alg;
};
static int caam_hash_cra_init(struct crypto_tfm *tfm)
{
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
struct crypto_alg *base = tfm->__crt_alg;
struct hash_alg_common *halg =
container_of(base, struct hash_alg_common, base);
struct ahash_alg *alg =
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
HASH_MSG_LEN + SHA1_DIGEST_SIZE,
HASH_MSG_LEN + 32,
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
int ret = 0;
/*
* Get a Job ring from Job Ring driver to ensure in-order
* crypto request processing per tfm
*/
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
/* copy descriptor header template value */
ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
ret = ahash_set_sh_desc(ahash);
return ret;
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->sh_desc_update_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
desc_bytes(ctx->sh_desc_update),
DMA_TO_DEVICE);
if (ctx->sh_desc_update_first_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(ctx->sh_desc_update_first),
DMA_TO_DEVICE);
if (ctx->sh_desc_fin_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
if (ctx->sh_desc_digest_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
desc_bytes(ctx->sh_desc_digest),
DMA_TO_DEVICE);
if (ctx->sh_desc_finup_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
static void __exit caam_algapi_hash_exit(void)
{
struct caam_hash_alg *t_alg, *n;
if (!hash_list.next)
return;
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
}
static struct caam_hash_alg *
caam_hash_alloc(struct caam_hash_template *template,
bool keyed)
{
struct caam_hash_alg *t_alg;
struct ahash_alg *halg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
if (!t_alg) {
pr_err("failed to allocate t_alg\n");
return ERR_PTR(-ENOMEM);
}
t_alg->ahash_alg = template->template_ahash;
halg = &t_alg->ahash_alg;
alg = &halg->halg.base;
if (keyed) {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
alg->cra_exit = caam_hash_cra_exit;
alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
t_alg->alg_op = template->alg_op;
return t_alg;
}
static int __init caam_algapi_hash_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
void *priv;
int i = 0, err = 0;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev) {
of_node_put(dev_node);
return -ENODEV;
}
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
if (!priv)
return -ENODEV;
INIT_LIST_HEAD(&hash_list);
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
/* TODO: check if h/w supports alg */
struct caam_hash_alg *t_alg;
/* register hmac version */
t_alg = caam_hash_alloc(&driver_hash[i], true);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
pr_warn("%s alg allocation failed\n",
driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
list_add_tail(&t_alg->entry, &hash_list);
/* register unkeyed version */
t_alg = caam_hash_alloc(&driver_hash[i], false);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
pr_warn("%s alg allocation failed\n",
driver_hash[i].driver_name);
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed\n",
t_alg->ahash_alg.halg.base.cra_driver_name);
kfree(t_alg);
} else
list_add_tail(&t_alg->entry, &hash_list);
}
return err;
}
module_init(caam_algapi_hash_init);
module_exit(caam_algapi_hash_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
MODULE_AUTHOR("Freescale Semiconductor - NMG");
| gpl-2.0 |
turulli/test | xbmc/filesystem/posix/PosixFile.cpp | 67 | 8639 | /*
* Copyright (C) 2014 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#if defined(TARGET_POSIX)
#include "PosixFile.h"
#include "utils/AliasShortcutUtils.h"
#include "URL.h"
#include "utils/log.h"
#include "filesystem/File.h"
#ifdef HAVE_CONFIG_H
#include "config.h" // for HAVE_POSIX_FADVISE
#endif // HAVE_CONFIG_H
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <string>
#include <assert.h>
#include <limits.h>
#include <algorithm>
#include <sys/ioctl.h>
#include <errno.h>
using namespace XFILE;
CPosixFile::CPosixFile() :
m_fd(-1), m_filePos(-1), m_lastDropPos(-1), m_allowWrite(false)
{ }
CPosixFile::~CPosixFile()
{
if (m_fd >= 0)
close(m_fd);
}
// local helper
static std::string getFilename(const CURL& url)
{
std::string filename(url.GetFileName());
if (IsAliasShortcut(filename))
TranslateAliasShortcut(filename);
return filename;
}
bool CPosixFile::Open(const CURL& url)
{
if (m_fd >= 0)
return false;
const std::string filename(getFilename(url));
if (filename.empty())
return false;
m_fd = open(filename.c_str(), O_RDONLY, S_IRUSR | S_IRGRP | S_IROTH);
m_filePos = 0;
return m_fd != -1;
}
bool CPosixFile::OpenForWrite(const CURL& url, bool bOverWrite /* = false*/ )
{
if (m_fd >= 0)
return false;
const std::string filename(getFilename(url));
if (filename.empty())
return false;
m_fd = open(filename.c_str(), O_RDWR | O_CREAT | (bOverWrite ? O_TRUNC : 0), S_IWUSR | S_IRUSR | S_IRGRP | S_IWGRP | S_IROTH);
if (m_fd < 0)
return false;
m_filePos = 0;
m_allowWrite = true;
return true;
}
void CPosixFile::Close()
{
if (m_fd >= 0)
{
close(m_fd);
m_fd = -1;
m_filePos = -1;
m_lastDropPos = -1;
m_allowWrite = false;
}
}
ssize_t CPosixFile::Read(void* lpBuf, size_t uiBufSize)
{
if (m_fd < 0)
return -1;
assert(lpBuf != NULL || uiBufSize == 0);
if (lpBuf == NULL && uiBufSize != 0)
return -1;
if (uiBufSize > SSIZE_MAX)
uiBufSize = SSIZE_MAX;
const ssize_t res = read(m_fd, lpBuf, uiBufSize);
if (res < 0)
{
Seek(0, SEEK_CUR); // force update file position
return -1;
}
if (m_filePos >= 0)
{
m_filePos += res; // if m_filePos was known - update it
#if defined(HAVE_POSIX_FADVISE)
// Drop the cache between then last drop and 16 MB behind where we
// are now, to make sure the file doesn't displace everything else.
// However, never throw out the first 16 MB of the file, as it might
// be the header etc., and never ask the OS to drop in chunks of
// less than 1 MB.
const int64_t end_drop = m_filePos - 16 * 1024 * 1024;
if (end_drop >= 17 * 1024 * 1024)
{
const int64_t start_drop = std::max<int64_t>(m_lastDropPos, 16 * 1024 * 1024);
if (end_drop - start_drop >= 1 * 1024 * 1024 &&
posix_fadvise(m_fd, start_drop, end_drop - start_drop, POSIX_FADV_DONTNEED) == 0)
m_lastDropPos = end_drop;
}
#endif
}
return res;
}
ssize_t CPosixFile::Write(const void* lpBuf, size_t uiBufSize)
{
if (m_fd < 0)
return -1;
assert(lpBuf != NULL || uiBufSize == 0);
if ((lpBuf == NULL && uiBufSize != 0) || !m_allowWrite)
return -1;
if (uiBufSize > SSIZE_MAX)
uiBufSize = SSIZE_MAX;
const ssize_t res = write(m_fd, lpBuf, uiBufSize);
if (res < 0)
{
Seek(0, SEEK_CUR); // force update file position
return -1;
}
if (m_filePos >= 0)
m_filePos += res; // if m_filePos was known - update it
return res;
}
int64_t CPosixFile::Seek(int64_t iFilePosition, int iWhence /* = SEEK_SET*/)
{
if (m_fd < 0)
return -1;
#ifdef TARGET_ANDROID
// TODO: properly support with detection in configure
// Android special case: Android doesn't substitute off64_t for off_t and similar functions
m_filePos = lseek64(m_fd, (off64_t)iFilePosition, iWhence);
#else // !TARGET_ANDROID
const off_t filePosOffT = (off_t) iFilePosition;
// check for parameter overflow
if (sizeof(int64_t) != sizeof(off_t) && iFilePosition != filePosOffT)
return -1;
m_filePos = lseek(m_fd, filePosOffT, iWhence);
#endif // !TARGET_ANDROID
return m_filePos;
}
int CPosixFile::Truncate(int64_t size)
{
if (m_fd < 0)
return -1;
const off_t sizeOffT = (off_t) size;
// check for parameter overflow
if (sizeof(int64_t) != sizeof(off_t) && size != sizeOffT)
return -1;
return ftruncate(m_fd, sizeOffT);
}
int64_t CPosixFile::GetPosition()
{
if (m_fd < 0)
return -1;
if (m_filePos < 0)
m_filePos = lseek(m_fd, 0, SEEK_CUR);
return m_filePos;
}
int64_t CPosixFile::GetLength()
{
if (m_fd < 0)
return -1;
struct stat64 st;
if (fstat64(m_fd, &st) != 0)
return -1;
return st.st_size;
}
void CPosixFile::Flush()
{
if (m_fd >= 0)
fsync(m_fd);
}
int CPosixFile::IoControl(EIoControl request, void* param)
{
if (m_fd < 0)
return -1;
if (request == IOCTRL_NATIVE)
{
if(!param)
return -1;
return ioctl(m_fd, ((SNativeIoControl*)param)->request, ((SNativeIoControl*)param)->param);
}
else if (request == IOCTRL_SEEK_POSSIBLE)
{
if (GetPosition() < 0)
return -1; // current position is unknown, can't test seeking
else if (m_filePos > 0)
{
const int64_t orgPos = m_filePos;
// try to seek one byte back
const bool seekPossible = (Seek(orgPos - 1, SEEK_SET) == (orgPos - 1));
// restore file position
if (Seek(orgPos, SEEK_SET) != orgPos)
return 0; // seeking is not possible
return seekPossible ? 1 : 0;
}
else
{ // m_filePos == 0
// try to seek one byte forward
const bool seekPossible = (Seek(1, SEEK_SET) == 1);
// restore file position
if (Seek(0, SEEK_SET) != 0)
return 0; // seeking is not possible
if (seekPossible)
return 1;
if (GetLength() <= 0)
return -1; // size of file is zero or can be zero, can't test seeking
else
return 0; // size of file is 1 byte or more and seeking not possible
}
}
return -1;
}
bool CPosixFile::Delete(const CURL& url)
{
const std::string filename(getFilename(url));
if (filename.empty())
return false;
if (unlink(filename.c_str()) == 0)
return true;
if (errno == EACCES || errno == EPERM)
CLog::LogF(LOGWARNING, "Can't access file \"%s\"", filename.c_str());
return false;
}
bool CPosixFile::Rename(const CURL& url, const CURL& urlnew)
{
const std::string name(getFilename(url)), newName(getFilename(urlnew));
if (name.empty() || newName.empty())
return false;
if (name == newName)
return true;
if (rename(name.c_str(), newName.c_str()) == 0)
return true;
if (errno == EACCES || errno == EPERM)
CLog::LogF(LOGWARNING, "Can't access file \"%s\" for rename to \"%s\"", name.c_str(), newName.c_str());
// rename across mount points - need to copy/delete
if (errno == EXDEV)
{
CLog::LogF(LOGDEBUG, "Source file \"%s\" and target file \"%s\" are located on different filesystems, copy&delete will be used instead of rename", name.c_str(), newName.c_str());
if (XFILE::CFile::Copy(name, newName))
{
if (XFILE::CFile::Delete(name))
return true;
else
XFILE::CFile::Delete(newName);
}
}
return false;
}
bool CPosixFile::Exists(const CURL& url)
{
const std::string filename(getFilename(url));
if (filename.empty())
return false;
struct stat64 st;
return stat64(filename.c_str(), &st) == 0 && !S_ISDIR(st.st_mode);
}
int CPosixFile::Stat(const CURL& url, struct __stat64* buffer)
{
assert(buffer != NULL);
const std::string filename(getFilename(url));
if (filename.empty() || !buffer)
return -1;
return stat64(filename.c_str(), buffer);
}
int CPosixFile::Stat(struct __stat64* buffer)
{
assert(buffer != NULL);
if (m_fd < 0 || !buffer)
return -1;
return fstat64(m_fd, buffer);
}
#endif // TARGET_POSIX
| gpl-2.0 |
ultrasystem/system | fs/ntfs/inode.c | 323 | 99001 | /**
* inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2007 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/aio.h>
#include "aops.h"
#include "attrib.h"
#include "bitmap.h"
#include "dir.h"
#include "debug.h"
#include "inode.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
#include "time.h"
#include "ntfs.h"
/**
* ntfs_test_inode - compare two (possibly fake) inodes for equality
* @vi: vfs inode which to test
* @na: ntfs attribute which is being tested with
*
* Compare the ntfs attribute embedded in the ntfs specific part of the vfs
* inode @vi for equality with the ntfs attribute @na.
*
* If searching for the normal file/directory inode, set @na->type to AT_UNUSED.
* @na->name and @na->name_len are then ignored.
*
* Return 1 if the attributes match and 0 if not.
*
* NOTE: This function runs with the inode_hash_lock spin lock held so it is not
* allowed to sleep.
*/
int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
{
ntfs_inode *ni;
if (vi->i_ino != na->mft_no)
return 0;
ni = NTFS_I(vi);
/* If !NInoAttr(ni), @vi is a normal file or directory inode. */
if (likely(!NInoAttr(ni))) {
/* If not looking for a normal inode this is a mismatch. */
if (unlikely(na->type != AT_UNUSED))
return 0;
} else {
/* A fake inode describing an attribute. */
if (ni->type != na->type)
return 0;
if (ni->name_len != na->name_len)
return 0;
if (na->name_len && memcmp(ni->name, na->name,
na->name_len * sizeof(ntfschar)))
return 0;
}
/* Match! */
return 1;
}
/**
* ntfs_init_locked_inode - initialize an inode
* @vi: vfs inode to initialize
* @na: ntfs attribute which to initialize @vi to
*
* Initialize the vfs inode @vi with the values from the ntfs attribute @na in
* order to enable ntfs_test_inode() to do its work.
*
* If initializing the normal file/directory inode, set @na->type to AT_UNUSED.
* In that case, @na->name and @na->name_len should be set to NULL and 0,
* respectively. Although that is not strictly necessary as
* ntfs_read_locked_inode() will fill them in later.
*
* Return 0 on success and -errno on error.
*
* NOTE: This function runs with the inode->i_lock spin lock held so it is not
* allowed to sleep. (Hence the GFP_ATOMIC allocation.)
*/
static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
{
ntfs_inode *ni = NTFS_I(vi);
vi->i_ino = na->mft_no;
ni->type = na->type;
if (na->type == AT_INDEX_ALLOCATION)
NInoSetMstProtected(ni);
ni->name = na->name;
ni->name_len = na->name_len;
/* If initializing a normal inode, we are done. */
if (likely(na->type == AT_UNUSED)) {
BUG_ON(na->name);
BUG_ON(na->name_len);
return 0;
}
/* It is a fake inode. */
NInoSetAttr(ni);
/*
* We have I30 global constant as an optimization as it is the name
* in >99.9% of named attributes! The other <0.1% incur a GFP_ATOMIC
* allocation but that is ok. And most attributes are unnamed anyway,
* thus the fraction of named attributes with name != I30 is actually
* absolutely tiny.
*/
if (na->name_len && na->name != I30) {
unsigned int i;
BUG_ON(!na->name);
i = na->name_len * sizeof(ntfschar);
ni->name = kmalloc(i + sizeof(ntfschar), GFP_ATOMIC);
if (!ni->name)
return -ENOMEM;
memcpy(ni->name, na->name, i);
ni->name[na->name_len] = 0;
}
return 0;
}
typedef int (*set_t)(struct inode *, void *);
static int ntfs_read_locked_inode(struct inode *vi);
static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi);
static int ntfs_read_locked_index_inode(struct inode *base_vi,
struct inode *vi);
/**
* ntfs_iget - obtain a struct inode corresponding to a specific normal inode
* @sb: super block of mounted volume
* @mft_no: mft record number / inode number to obtain
*
* Obtain the struct inode corresponding to a specific normal inode (i.e. a
* file or directory).
*
* If the inode is in the cache, it is just returned with an increased
* reference count. Otherwise, a new struct inode is allocated and initialized,
* and finally ntfs_read_locked_inode() is called to read in the inode and
* fill in the remainder of the inode structure.
*
* Return the struct inode on success. Check the return value with IS_ERR() and
* if true, the function failed and the error code is obtained from PTR_ERR().
*/
struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no)
{
struct inode *vi;
int err;
ntfs_attr na;
na.mft_no = mft_no;
na.type = AT_UNUSED;
na.name = NULL;
na.name_len = 0;
vi = iget5_locked(sb, mft_no, (test_t)ntfs_test_inode,
(set_t)ntfs_init_locked_inode, &na);
if (unlikely(!vi))
return ERR_PTR(-ENOMEM);
err = 0;
/* If this is a freshly allocated inode, need to read it now. */
if (vi->i_state & I_NEW) {
err = ntfs_read_locked_inode(vi);
unlock_new_inode(vi);
}
/*
* There is no point in keeping bad inodes around if the failure was
* due to ENOMEM. We want to be able to retry again later.
*/
if (unlikely(err == -ENOMEM)) {
iput(vi);
vi = ERR_PTR(err);
}
return vi;
}
/**
* ntfs_attr_iget - obtain a struct inode corresponding to an attribute
* @base_vi: vfs base inode containing the attribute
* @type: attribute type
* @name: Unicode name of the attribute (NULL if unnamed)
* @name_len: length of @name in Unicode characters (0 if unnamed)
*
* Obtain the (fake) struct inode corresponding to the attribute specified by
* @type, @name, and @name_len, which is present in the base mft record
* specified by the vfs inode @base_vi.
*
* If the attribute inode is in the cache, it is just returned with an
* increased reference count. Otherwise, a new struct inode is allocated and
* initialized, and finally ntfs_read_locked_attr_inode() is called to read the
* attribute and fill in the inode structure.
*
* Note, for index allocation attributes, you need to use ntfs_index_iget()
* instead of ntfs_attr_iget() as working with indices is a lot more complex.
*
* Return the struct inode of the attribute inode on success. Check the return
* value with IS_ERR() and if true, the function failed and the error code is
* obtained from PTR_ERR().
*/
struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type,
ntfschar *name, u32 name_len)
{
struct inode *vi;
int err;
ntfs_attr na;
/* Make sure no one calls ntfs_attr_iget() for indices. */
BUG_ON(type == AT_INDEX_ALLOCATION);
na.mft_no = base_vi->i_ino;
na.type = type;
na.name = name;
na.name_len = name_len;
vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode,
(set_t)ntfs_init_locked_inode, &na);
if (unlikely(!vi))
return ERR_PTR(-ENOMEM);
err = 0;
/* If this is a freshly allocated inode, need to read it now. */
if (vi->i_state & I_NEW) {
err = ntfs_read_locked_attr_inode(base_vi, vi);
unlock_new_inode(vi);
}
/*
* There is no point in keeping bad attribute inodes around. This also
* simplifies things in that we never need to check for bad attribute
* inodes elsewhere.
*/
if (unlikely(err)) {
iput(vi);
vi = ERR_PTR(err);
}
return vi;
}
/**
* ntfs_index_iget - obtain a struct inode corresponding to an index
* @base_vi: vfs base inode containing the index related attributes
* @name: Unicode name of the index
* @name_len: length of @name in Unicode characters
*
* Obtain the (fake) struct inode corresponding to the index specified by @name
* and @name_len, which is present in the base mft record specified by the vfs
* inode @base_vi.
*
* If the index inode is in the cache, it is just returned with an increased
* reference count. Otherwise, a new struct inode is allocated and
* initialized, and finally ntfs_read_locked_index_inode() is called to read
* the index related attributes and fill in the inode structure.
*
* Return the struct inode of the index inode on success. Check the return
* value with IS_ERR() and if true, the function failed and the error code is
* obtained from PTR_ERR().
*/
struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name,
u32 name_len)
{
struct inode *vi;
int err;
ntfs_attr na;
na.mft_no = base_vi->i_ino;
na.type = AT_INDEX_ALLOCATION;
na.name = name;
na.name_len = name_len;
vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode,
(set_t)ntfs_init_locked_inode, &na);
if (unlikely(!vi))
return ERR_PTR(-ENOMEM);
err = 0;
/* If this is a freshly allocated inode, need to read it now. */
if (vi->i_state & I_NEW) {
err = ntfs_read_locked_index_inode(base_vi, vi);
unlock_new_inode(vi);
}
/*
* There is no point in keeping bad index inodes around. This also
* simplifies things in that we never need to check for bad index
* inodes elsewhere.
*/
if (unlikely(err)) {
iput(vi);
vi = ERR_PTR(err);
}
return vi;
}
struct inode *ntfs_alloc_big_inode(struct super_block *sb)
{
ntfs_inode *ni;
ntfs_debug("Entering.");
ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS);
if (likely(ni != NULL)) {
ni->state = 0;
return VFS_I(ni);
}
ntfs_error(sb, "Allocation of NTFS big inode structure failed.");
return NULL;
}
static void ntfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
}
void ntfs_destroy_big_inode(struct inode *inode)
{
ntfs_inode *ni = NTFS_I(inode);
ntfs_debug("Entering.");
BUG_ON(ni->page);
if (!atomic_dec_and_test(&ni->count))
BUG();
call_rcu(&inode->i_rcu, ntfs_i_callback);
}
static inline ntfs_inode *ntfs_alloc_extent_inode(void)
{
ntfs_inode *ni;
ntfs_debug("Entering.");
ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS);
if (likely(ni != NULL)) {
ni->state = 0;
return ni;
}
ntfs_error(NULL, "Allocation of NTFS inode structure failed.");
return NULL;
}
static void ntfs_destroy_extent_inode(ntfs_inode *ni)
{
ntfs_debug("Entering.");
BUG_ON(ni->page);
if (!atomic_dec_and_test(&ni->count))
BUG();
kmem_cache_free(ntfs_inode_cache, ni);
}
/*
* The attribute runlist lock has separate locking rules from the
* normal runlist lock, so split the two lock-classes:
*/
static struct lock_class_key attr_list_rl_lock_class;
/**
* __ntfs_init_inode - initialize ntfs specific part of an inode
* @sb: super block of mounted volume
* @ni: freshly allocated ntfs inode which to initialize
*
* Initialize an ntfs inode to defaults.
*
* NOTE: ni->mft_no, ni->state, ni->type, ni->name, and ni->name_len are left
* untouched. Make sure to initialize them elsewhere.
*
* Return zero on success and -ENOMEM on error.
*/
void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
{
ntfs_debug("Entering.");
rwlock_init(&ni->size_lock);
ni->initialized_size = ni->allocated_size = 0;
ni->seq_no = 0;
atomic_set(&ni->count, 1);
ni->vol = NTFS_SB(sb);
ntfs_init_runlist(&ni->runlist);
mutex_init(&ni->mrec_lock);
ni->page = NULL;
ni->page_ofs = 0;
ni->attr_list_size = 0;
ni->attr_list = NULL;
ntfs_init_runlist(&ni->attr_list_rl);
lockdep_set_class(&ni->attr_list_rl.lock,
&attr_list_rl_lock_class);
ni->itype.index.block_size = 0;
ni->itype.index.vcn_size = 0;
ni->itype.index.collation_rule = 0;
ni->itype.index.block_size_bits = 0;
ni->itype.index.vcn_size_bits = 0;
mutex_init(&ni->extent_lock);
ni->nr_extents = 0;
ni->ext.base_ntfs_ino = NULL;
}
/*
* Extent inodes get MFT-mapped in a nested way, while the base inode
* is still mapped. Teach this nesting to the lock validator by creating
* a separate class for nested inode's mrec_lock's:
*/
static struct lock_class_key extent_inode_mrec_lock_key;
inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
unsigned long mft_no)
{
ntfs_inode *ni = ntfs_alloc_extent_inode();
ntfs_debug("Entering.");
if (likely(ni != NULL)) {
__ntfs_init_inode(sb, ni);
lockdep_set_class(&ni->mrec_lock, &extent_inode_mrec_lock_key);
ni->mft_no = mft_no;
ni->type = AT_UNUSED;
ni->name = NULL;
ni->name_len = 0;
}
return ni;
}
/**
* ntfs_is_extended_system_file - check if a file is in the $Extend directory
* @ctx: initialized attribute search context
*
* Search all file name attributes in the inode described by the attribute
* search context @ctx and check if any of the names are in the $Extend system
* directory.
*
* Return values:
* 1: file is in $Extend directory
* 0: file is not in $Extend directory
* -errno: failed to determine if the file is in the $Extend directory
*/
static int ntfs_is_extended_system_file(ntfs_attr_search_ctx *ctx)
{
int nr_links, err;
/* Restart search. */
ntfs_attr_reinit_search_ctx(ctx);
/* Get number of hard links. */
nr_links = le16_to_cpu(ctx->mrec->link_count);
/* Loop through all hard links. */
while (!(err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0, NULL, 0,
ctx))) {
FILE_NAME_ATTR *file_name_attr;
ATTR_RECORD *attr = ctx->attr;
u8 *p, *p2;
nr_links--;
/*
* Maximum sanity checking as we are called on an inode that
* we suspect might be corrupt.
*/
p = (u8*)attr + le32_to_cpu(attr->length);
if (p < (u8*)ctx->mrec || (u8*)p > (u8*)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_in_use)) {
err_corrupt_attr:
ntfs_error(ctx->ntfs_ino->vol->sb, "Corrupt file name "
"attribute. You should run chkdsk.");
return -EIO;
}
if (attr->non_resident) {
ntfs_error(ctx->ntfs_ino->vol->sb, "Non-resident file "
"name. You should run chkdsk.");
return -EIO;
}
if (attr->flags) {
ntfs_error(ctx->ntfs_ino->vol->sb, "File name with "
"invalid flags. You should run "
"chkdsk.");
return -EIO;
}
if (!(attr->data.resident.flags & RESIDENT_ATTR_IS_INDEXED)) {
ntfs_error(ctx->ntfs_ino->vol->sb, "Unindexed file "
"name. You should run chkdsk.");
return -EIO;
}
file_name_attr = (FILE_NAME_ATTR*)((u8*)attr +
le16_to_cpu(attr->data.resident.value_offset));
p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length);
if (p2 < (u8*)attr || p2 > p)
goto err_corrupt_attr;
/* This attribute is ok, but is it in the $Extend directory? */
if (MREF_LE(file_name_attr->parent_directory) == FILE_Extend)
return 1; /* YES, it's an extended system file. */
}
if (unlikely(err != -ENOENT))
return err;
if (unlikely(nr_links)) {
ntfs_error(ctx->ntfs_ino->vol->sb, "Inode hard link count "
"doesn't match number of name attributes. You "
"should run chkdsk.");
return -EIO;
}
return 0; /* NO, it is not an extended system file. */
}
/**
* ntfs_read_locked_inode - read an inode from its device
* @vi: inode to read
*
* ntfs_read_locked_inode() is called from ntfs_iget() to read the inode
* described by @vi into memory from the device.
*
* The only fields in @vi that we need to/can look at when the function is
* called are i_sb, pointing to the mounted device's super block, and i_ino,
* the number of the inode to load.
*
* ntfs_read_locked_inode() maps, pins and locks the mft record number i_ino
* for reading and sets up the necessary @vi fields as well as initializing
* the ntfs inode.
*
* Q: What locks are held when the function is called?
* A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
* i_flags is set to 0 and we have no business touching it. Only an ioctl()
* is allowed to write to them. We should of course be honouring them but
* we need to do that using the IS_* macros defined in include/linux/fs.h.
* In any case ntfs_read_locked_inode() has nothing to do with i_flags.
*
* Return 0 on success and -errno on error. In the error case, the inode will
* have had make_bad_inode() executed on it.
*/
static int ntfs_read_locked_inode(struct inode *vi)
{
ntfs_volume *vol = NTFS_SB(vi->i_sb);
ntfs_inode *ni;
struct inode *bvi;
MFT_RECORD *m;
ATTR_RECORD *a;
STANDARD_INFORMATION *si;
ntfs_attr_search_ctx *ctx;
int err = 0;
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
/* Setup the generic vfs inode parts now. */
/*
* This is for checking whether an inode has changed w.r.t. a file so
* that the file can be updated if necessary (compare with f_version).
*/
vi->i_version = 1;
vi->i_uid = vol->uid;
vi->i_gid = vol->gid;
vi->i_mode = 0;
/*
* Initialize the ntfs specific part of @vi special casing
* FILE_MFT which we need to do at mount time.
*/
if (vi->i_ino != FILE_MFT)
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
m = map_mft_record(ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(ni, m);
if (!ctx) {
err = -ENOMEM;
goto unm_err_out;
}
if (!(m->flags & MFT_RECORD_IN_USE)) {
ntfs_error(vi->i_sb, "Inode is not in use!");
goto unm_err_out;
}
if (m->base_mft_record) {
ntfs_error(vi->i_sb, "Inode is an extent inode!");
goto unm_err_out;
}
/* Transfer information from mft record into vfs and ntfs inodes. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
/*
* FIXME: Keep in mind that link_count is two for files which have both
* a long file name and a short file name as separate entries, so if
* we are hiding short file names this will be too high. Either we need
* to account for the short file names by subtracting them or we need
* to make sure we delete files even though i_nlink is not zero which
* might be tricky due to vfs interactions. Need to think about this
* some more when implementing the unlink command.
*/
set_nlink(vi, le16_to_cpu(m->link_count));
/*
* FIXME: Reparse points can have the directory bit set even though
* they would be S_IFLNK. Need to deal with this further below when we
* implement reparse points / symbolic links but it will do for now.
* Also if not a directory, it could be something else, rather than
* a regular file. But again, will do for now.
*/
/* Everyone gets all permissions. */
vi->i_mode |= S_IRWXUGO;
/* If read-only, no one gets write permissions. */
if (IS_RDONLY(vi))
vi->i_mode &= ~S_IWUGO;
if (m->flags & MFT_RECORD_IS_DIRECTORY) {
vi->i_mode |= S_IFDIR;
/*
* Apply the directory permissions mask set in the mount
* options.
*/
vi->i_mode &= ~vol->dmask;
/* Things break without this kludge! */
if (vi->i_nlink > 1)
set_nlink(vi, 1);
} else {
vi->i_mode |= S_IFREG;
/* Apply the file permissions mask set in the mount options. */
vi->i_mode &= ~vol->fmask;
}
/*
* Find the standard information attribute in the mft record. At this
* stage we haven't setup the attribute list stuff yet, so this could
* in fact fail if the standard information is in an extent record, but
* I don't think this actually ever happens.
*/
err = ntfs_attr_lookup(AT_STANDARD_INFORMATION, NULL, 0, 0, 0, NULL, 0,
ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
/*
* TODO: We should be performing a hot fix here (if the
* recover mount option is set) by creating a new
* attribute.
*/
ntfs_error(vi->i_sb, "$STANDARD_INFORMATION attribute "
"is missing.");
}
goto unm_err_out;
}
a = ctx->attr;
/* Get the standard information attribute value. */
si = (STANDARD_INFORMATION*)((u8*)a +
le16_to_cpu(a->data.resident.value_offset));
/* Transfer information from the standard information into vi. */
/*
* Note: The i_?times do not quite map perfectly onto the NTFS times,
* but they are close enough, and in the end it doesn't really matter
* that much...
*/
/*
* mtime is the last change of the data within the file. Not changed
* when only metadata is changed, e.g. a rename doesn't affect mtime.
*/
vi->i_mtime = ntfs2utc(si->last_data_change_time);
/*
* ctime is the last change of the metadata of the file. This obviously
* always changes, when mtime is changed. ctime can be changed on its
* own, mtime is then not changed, e.g. when a file is renamed.
*/
vi->i_ctime = ntfs2utc(si->last_mft_change_time);
/*
* Last access to the data within the file. Not changed during a rename
* for example but changed whenever the file is written to.
*/
vi->i_atime = ntfs2utc(si->last_access_time);
/* Find the attribute list attribute if present. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_ATTRIBUTE_LIST, NULL, 0, 0, 0, NULL, 0, ctx);
if (err) {
if (unlikely(err != -ENOENT)) {
ntfs_error(vi->i_sb, "Failed to lookup attribute list "
"attribute.");
goto unm_err_out;
}
} else /* if (!err) */ {
if (vi->i_ino == FILE_MFT)
goto skip_attr_list_load;
ntfs_debug("Attribute list found in inode 0x%lx.", vi->i_ino);
NInoSetAttrList(ni);
a = ctx->attr;
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "Attribute list attribute is "
"compressed.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED ||
a->flags & ATTR_IS_SPARSE) {
if (a->non_resident) {
ntfs_error(vi->i_sb, "Non-resident attribute "
"list attribute is encrypted/"
"sparse.");
goto unm_err_out;
}
ntfs_warning(vi->i_sb, "Resident attribute list "
"attribute in inode 0x%lx is marked "
"encrypted/sparse which is not true. "
"However, Windows allows this and "
"chkdsk does not detect or correct it "
"so we will just ignore the invalid "
"flags and pretend they are not set.",
vi->i_ino);
}
/* Now allocate memory for the attribute list. */
ni->attr_list_size = (u32)ntfs_attr_size(a);
ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
if (!ni->attr_list) {
ntfs_error(vi->i_sb, "Not enough memory to allocate "
"buffer for attribute list.");
err = -ENOMEM;
goto unm_err_out;
}
if (a->non_resident) {
NInoSetAttrListNonResident(ni);
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "Attribute list has non "
"zero lowest_vcn.");
goto unm_err_out;
}
/*
* Setup the runlist. No need for locking as we have
* exclusive access to the inode at this time.
*/
ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
a, NULL);
if (IS_ERR(ni->attr_list_rl.rl)) {
err = PTR_ERR(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
ntfs_error(vi->i_sb, "Mapping pairs "
"decompression failed.");
goto unm_err_out;
}
/* Now load the attribute list. */
if ((err = load_attribute_list(vol, &ni->attr_list_rl,
ni->attr_list, ni->attr_list_size,
sle64_to_cpu(a->data.non_resident.
initialized_size)))) {
ntfs_error(vi->i_sb, "Failed to load "
"attribute list attribute.");
goto unm_err_out;
}
} else /* if (!a->non_resident) */ {
if ((u8*)a + le16_to_cpu(a->data.resident.value_offset)
+ le32_to_cpu(
a->data.resident.value_length) >
(u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "Corrupt attribute list "
"in inode.");
goto unm_err_out;
}
/* Now copy the attribute list. */
memcpy(ni->attr_list, (u8*)a + le16_to_cpu(
a->data.resident.value_offset),
le32_to_cpu(
a->data.resident.value_length));
}
}
skip_attr_list_load:
/*
* If an attribute list is present we now have the attribute list value
* in ntfs_ino->attr_list and it is ntfs_ino->attr_list_size bytes.
*/
if (S_ISDIR(vi->i_mode)) {
loff_t bvi_size;
ntfs_inode *bni;
INDEX_ROOT *ir;
u8 *ir_end, *index_end;
/* It is a directory, find index root attribute. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE,
0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
// FIXME: File is corrupt! Hot-fix with empty
// index root attribute if recovery option is
// set.
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute "
"is missing.");
}
goto unm_err_out;
}
a = ctx->attr;
/* Set up the state. */
if (unlikely(a->non_resident)) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute is not "
"resident.");
goto unm_err_out;
}
/* Ensure the attribute name is placed before the value. */
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(a->data.resident.value_offset)))) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute name is "
"placed after the attribute value.");
goto unm_err_out;
}
/*
* Compressed/encrypted index root just means that the newly
* created files in that directory should be created compressed/
* encrypted. However index root cannot be both compressed and
* encrypted.
*/
if (a->flags & ATTR_COMPRESSION_MASK)
NInoSetCompressed(ni);
if (a->flags & ATTR_IS_ENCRYPTED) {
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "Found encrypted and "
"compressed attribute.");
goto unm_err_out;
}
NInoSetEncrypted(ni);
}
if (a->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
ir = (INDEX_ROOT*)((u8*)a +
le16_to_cpu(a->data.resident.value_offset));
ir_end = (u8*)ir + le32_to_cpu(a->data.resident.value_length);
if (ir_end > (u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is "
"corrupt.");
goto unm_err_out;
}
index_end = (u8*)&ir->index +
le32_to_cpu(ir->index.index_length);
if (index_end > ir_end) {
ntfs_error(vi->i_sb, "Directory index is corrupt.");
goto unm_err_out;
}
if (ir->type != AT_FILE_NAME) {
ntfs_error(vi->i_sb, "Indexed attribute is not "
"$FILE_NAME.");
goto unm_err_out;
}
if (ir->collation_rule != COLLATION_FILE_NAME) {
ntfs_error(vi->i_sb, "Index collation rule is not "
"COLLATION_FILE_NAME.");
goto unm_err_out;
}
ni->itype.index.collation_rule = ir->collation_rule;
ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
if (ni->itype.index.block_size &
(ni->itype.index.block_size - 1)) {
ntfs_error(vi->i_sb, "Index block size (%u) is not a "
"power of two.",
ni->itype.index.block_size);
goto unm_err_out;
}
if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) > "
"PAGE_CACHE_SIZE (%ld) is not "
"supported. Sorry.",
ni->itype.index.block_size,
PAGE_CACHE_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (ni->itype.index.block_size < NTFS_BLOCK_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) < "
"NTFS_BLOCK_SIZE (%i) is not "
"supported. Sorry.",
ni->itype.index.block_size,
NTFS_BLOCK_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
ni->itype.index.block_size_bits =
ffs(ni->itype.index.block_size) - 1;
/* Determine the size of a vcn in the directory index. */
if (vol->cluster_size <= ni->itype.index.block_size) {
ni->itype.index.vcn_size = vol->cluster_size;
ni->itype.index.vcn_size_bits = vol->cluster_size_bits;
} else {
ni->itype.index.vcn_size = vol->sector_size;
ni->itype.index.vcn_size_bits = vol->sector_size_bits;
}
/* Setup the index allocation attribute, even if not present. */
NInoSetMstProtected(ni);
ni->type = AT_INDEX_ALLOCATION;
ni->name = I30;
ni->name_len = 4;
if (!(ir->index.flags & LARGE_INDEX)) {
/* No index allocation. */
vi->i_size = ni->initialized_size =
ni->allocated_size = 0;
/* We are done with the mft record, so we release it. */
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
m = NULL;
ctx = NULL;
goto skip_large_dir_stuff;
} /* LARGE_INDEX: Index allocation present. Setup state. */
NInoSetIndexAllocPresent(ni);
/* Find index allocation attribute. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_INDEX_ALLOCATION, I30, 4,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION "
"attribute is not present but "
"$INDEX_ROOT indicated it is.");
else
ntfs_error(vi->i_sb, "Failed to lookup "
"$INDEX_ALLOCATION "
"attribute.");
goto unm_err_out;
}
a = ctx->attr;
if (!a->non_resident) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is resident.");
goto unm_err_out;
}
/*
* Ensure the attribute name is placed before the mapping pairs
* array.
*/
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(
a->data.non_resident.mapping_pairs_offset)))) {
ntfs_error(vol->sb, "$INDEX_ALLOCATION attribute name "
"is placed after the mapping pairs "
"array.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is encrypted.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_SPARSE) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is sparse.");
goto unm_err_out;
}
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is compressed.");
goto unm_err_out;
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of "
"$INDEX_ALLOCATION attribute has non "
"zero lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
/*
* We are done with the mft record, so we release it. Otherwise
* we would deadlock in ntfs_attr_iget().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
m = NULL;
ctx = NULL;
/* Get the index bitmap attribute inode. */
bvi = ntfs_attr_iget(vi, AT_BITMAP, I30, 4);
if (IS_ERR(bvi)) {
ntfs_error(vi->i_sb, "Failed to get bitmap attribute.");
err = PTR_ERR(bvi);
goto unm_err_out;
}
bni = NTFS_I(bvi);
if (NInoCompressed(bni) || NInoEncrypted(bni) ||
NInoSparse(bni)) {
ntfs_error(vi->i_sb, "$BITMAP attribute is compressed "
"and/or encrypted and/or sparse.");
goto iput_unm_err_out;
}
/* Consistency check bitmap size vs. index allocation size. */
bvi_size = i_size_read(bvi);
if ((bvi_size << 3) < (vi->i_size >>
ni->itype.index.block_size_bits)) {
ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) "
"for index allocation (0x%llx).",
bvi_size << 3, vi->i_size);
goto iput_unm_err_out;
}
/* No longer need the bitmap attribute inode. */
iput(bvi);
skip_large_dir_stuff:
/* Setup the operations for this inode. */
vi->i_op = &ntfs_dir_inode_ops;
vi->i_fop = &ntfs_dir_ops;
} else {
/* It is a file. */
ntfs_attr_reinit_search_ctx(ctx);
/* Setup the data attribute, even if not present. */
ni->type = AT_DATA;
ni->name = NULL;
ni->name_len = 0;
/* Find first extent of the unnamed data attribute. */
err = ntfs_attr_lookup(AT_DATA, NULL, 0, 0, 0, NULL, 0, ctx);
if (unlikely(err)) {
vi->i_size = ni->initialized_size =
ni->allocated_size = 0;
if (err != -ENOENT) {
ntfs_error(vi->i_sb, "Failed to lookup $DATA "
"attribute.");
goto unm_err_out;
}
/*
* FILE_Secure does not have an unnamed $DATA
* attribute, so we special case it here.
*/
if (vi->i_ino == FILE_Secure)
goto no_data_attr_special_case;
/*
* Most if not all the system files in the $Extend
* system directory do not have unnamed data
* attributes so we need to check if the parent
* directory of the file is FILE_Extend and if it is
* ignore this error. To do this we need to get the
* name of this inode from the mft record as the name
* contains the back reference to the parent directory.
*/
if (ntfs_is_extended_system_file(ctx) > 0)
goto no_data_attr_special_case;
// FIXME: File is corrupt! Hot-fix with empty data
// attribute if recovery option is set.
ntfs_error(vi->i_sb, "$DATA attribute is missing.");
goto unm_err_out;
}
a = ctx->attr;
/* Setup the state. */
if (a->flags & (ATTR_COMPRESSION_MASK | ATTR_IS_SPARSE)) {
if (a->flags & ATTR_COMPRESSION_MASK) {
NInoSetCompressed(ni);
if (vol->cluster_size > 4096) {
ntfs_error(vi->i_sb, "Found "
"compressed data but "
"compression is "
"disabled due to "
"cluster size (%i) > "
"4kiB.",
vol->cluster_size);
goto unm_err_out;
}
if ((a->flags & ATTR_COMPRESSION_MASK)
!= ATTR_IS_COMPRESSED) {
ntfs_error(vi->i_sb, "Found unknown "
"compression method "
"or corrupt file.");
goto unm_err_out;
}
}
if (a->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
}
if (a->flags & ATTR_IS_ENCRYPTED) {
if (NInoCompressed(ni)) {
ntfs_error(vi->i_sb, "Found encrypted and "
"compressed data.");
goto unm_err_out;
}
NInoSetEncrypted(ni);
}
if (a->non_resident) {
NInoSetNonResident(ni);
if (NInoCompressed(ni) || NInoSparse(ni)) {
if (NInoCompressed(ni) && a->data.non_resident.
compression_unit != 4) {
ntfs_error(vi->i_sb, "Found "
"non-standard "
"compression unit (%u "
"instead of 4). "
"Cannot handle this.",
a->data.non_resident.
compression_unit);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (a->data.non_resident.compression_unit) {
ni->itype.compressed.block_size = 1U <<
(a->data.non_resident.
compression_unit +
vol->cluster_size_bits);
ni->itype.compressed.block_size_bits =
ffs(ni->itype.
compressed.
block_size) - 1;
ni->itype.compressed.block_clusters =
1U << a->data.
non_resident.
compression_unit;
} else {
ni->itype.compressed.block_size = 0;
ni->itype.compressed.block_size_bits =
0;
ni->itype.compressed.block_clusters =
0;
}
ni->itype.compressed.size = sle64_to_cpu(
a->data.non_resident.
compressed_size);
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of $DATA "
"attribute has non zero "
"lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(
a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
} else { /* Resident attribute. */
vi->i_size = ni->initialized_size = le32_to_cpu(
a->data.resident.value_length);
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(
a->data.resident.value_offset);
if (vi->i_size > ni->allocated_size) {
ntfs_error(vi->i_sb, "Resident data attribute "
"is corrupt (size exceeds "
"allocation).");
goto unm_err_out;
}
}
no_data_attr_special_case:
/* We are done with the mft record, so we release it. */
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
m = NULL;
ctx = NULL;
/* Setup the operations for this inode. */
vi->i_op = &ntfs_file_inode_ops;
vi->i_fop = &ntfs_file_ops;
}
if (NInoMstProtected(ni))
vi->i_mapping->a_ops = &ntfs_mst_aops;
else
vi->i_mapping->a_ops = &ntfs_aops;
/*
* The number of 512-byte blocks used on disk (for stat). This is in so
* far inaccurate as it doesn't account for any named streams or other
* special non-resident attributes, but that is how Windows works, too,
* so we are at least consistent with Windows, if not entirely
* consistent with the Linux Way. Doing it the Linux Way would cause a
* significant slowdown as it would involve iterating over all
* attributes in the mft record and adding the allocated/compressed
* sizes of all non-resident attributes present to give us the Linux
* correct size that should go into i_blocks (after division by 512).
*/
if (S_ISREG(vi->i_mode) && (NInoCompressed(ni) || NInoSparse(ni)))
vi->i_blocks = ni->itype.compressed.size >> 9;
else
vi->i_blocks = ni->allocated_size >> 9;
ntfs_debug("Done.");
return 0;
iput_unm_err_out:
iput(bvi);
unm_err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(ni);
err_out:
ntfs_error(vol->sb, "Failed with error code %i. Marking corrupt "
"inode 0x%lx as bad. Run chkdsk.", err, vi->i_ino);
make_bad_inode(vi);
if (err != -EOPNOTSUPP && err != -ENOMEM)
NVolSetErrors(vol);
return err;
}
/**
* ntfs_read_locked_attr_inode - read an attribute inode from its base inode
* @base_vi: base inode
* @vi: attribute inode to read
*
* ntfs_read_locked_attr_inode() is called from ntfs_attr_iget() to read the
* attribute inode described by @vi into memory from the base mft record
* described by @base_ni.
*
* ntfs_read_locked_attr_inode() maps, pins and locks the base inode for
* reading and looks up the attribute described by @vi before setting up the
* necessary fields in @vi as well as initializing the ntfs inode.
*
* Q: What locks are held when the function is called?
* A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
*
* Return 0 on success and -errno on error. In the error case, the inode will
* have had make_bad_inode() executed on it.
*
* Note this cannot be called for AT_INDEX_ALLOCATION.
*/
static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
{
ntfs_volume *vol = NTFS_SB(vi->i_sb);
ntfs_inode *ni, *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
int err = 0;
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
vi->i_mtime = base_vi->i_mtime;
vi->i_ctime = base_vi->i_ctime;
vi->i_atime = base_vi->i_atime;
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
vi->i_mode = base_vi->i_mode & ~S_IFMT;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (!ctx) {
err = -ENOMEM;
goto unm_err_out;
}
/* Find the attribute. */
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err))
goto unm_err_out;
a = ctx->attr;
if (a->flags & (ATTR_COMPRESSION_MASK | ATTR_IS_SPARSE)) {
if (a->flags & ATTR_COMPRESSION_MASK) {
NInoSetCompressed(ni);
if ((ni->type != AT_DATA) || (ni->type == AT_DATA &&
ni->name_len)) {
ntfs_error(vi->i_sb, "Found compressed "
"non-data or named data "
"attribute. Please report "
"you saw this message to "
"linux-ntfs-dev@lists."
"sourceforge.net");
goto unm_err_out;
}
if (vol->cluster_size > 4096) {
ntfs_error(vi->i_sb, "Found compressed "
"attribute but compression is "
"disabled due to cluster size "
"(%i) > 4kiB.",
vol->cluster_size);
goto unm_err_out;
}
if ((a->flags & ATTR_COMPRESSION_MASK) !=
ATTR_IS_COMPRESSED) {
ntfs_error(vi->i_sb, "Found unknown "
"compression method.");
goto unm_err_out;
}
}
/*
* The compressed/sparse flag set in an index root just means
* to compress all files.
*/
if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) {
ntfs_error(vi->i_sb, "Found mst protected attribute "
"but the attribute is %s. Please "
"report you saw this message to "
"linux-ntfs-dev@lists.sourceforge.net",
NInoCompressed(ni) ? "compressed" :
"sparse");
goto unm_err_out;
}
if (a->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
}
if (a->flags & ATTR_IS_ENCRYPTED) {
if (NInoCompressed(ni)) {
ntfs_error(vi->i_sb, "Found encrypted and compressed "
"data.");
goto unm_err_out;
}
/*
* The encryption flag set in an index root just means to
* encrypt all files.
*/
if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) {
ntfs_error(vi->i_sb, "Found mst protected attribute "
"but the attribute is encrypted. "
"Please report you saw this message "
"to linux-ntfs-dev@lists.sourceforge."
"net");
goto unm_err_out;
}
if (ni->type != AT_DATA) {
ntfs_error(vi->i_sb, "Found encrypted non-data "
"attribute.");
goto unm_err_out;
}
NInoSetEncrypted(ni);
}
if (!a->non_resident) {
/* Ensure the attribute name is placed before the value. */
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(a->data.resident.value_offset)))) {
ntfs_error(vol->sb, "Attribute name is placed after "
"the attribute value.");
goto unm_err_out;
}
if (NInoMstProtected(ni)) {
ntfs_error(vi->i_sb, "Found mst protected attribute "
"but the attribute is resident. "
"Please report you saw this message to "
"linux-ntfs-dev@lists.sourceforge.net");
goto unm_err_out;
}
vi->i_size = ni->initialized_size = le32_to_cpu(
a->data.resident.value_length);
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset);
if (vi->i_size > ni->allocated_size) {
ntfs_error(vi->i_sb, "Resident attribute is corrupt "
"(size exceeds allocation).");
goto unm_err_out;
}
} else {
NInoSetNonResident(ni);
/*
* Ensure the attribute name is placed before the mapping pairs
* array.
*/
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(
a->data.non_resident.mapping_pairs_offset)))) {
ntfs_error(vol->sb, "Attribute name is placed after "
"the mapping pairs array.");
goto unm_err_out;
}
if (NInoCompressed(ni) || NInoSparse(ni)) {
if (NInoCompressed(ni) && a->data.non_resident.
compression_unit != 4) {
ntfs_error(vi->i_sb, "Found non-standard "
"compression unit (%u instead "
"of 4). Cannot handle this.",
a->data.non_resident.
compression_unit);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (a->data.non_resident.compression_unit) {
ni->itype.compressed.block_size = 1U <<
(a->data.non_resident.
compression_unit +
vol->cluster_size_bits);
ni->itype.compressed.block_size_bits =
ffs(ni->itype.compressed.
block_size) - 1;
ni->itype.compressed.block_clusters = 1U <<
a->data.non_resident.
compression_unit;
} else {
ni->itype.compressed.block_size = 0;
ni->itype.compressed.block_size_bits = 0;
ni->itype.compressed.block_clusters = 0;
}
ni->itype.compressed.size = sle64_to_cpu(
a->data.non_resident.compressed_size);
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of attribute has "
"non-zero lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
}
if (NInoMstProtected(ni))
vi->i_mapping->a_ops = &ntfs_mst_aops;
else
vi->i_mapping->a_ops = &ntfs_aops;
if ((NInoCompressed(ni) || NInoSparse(ni)) && ni->type != AT_INDEX_ROOT)
vi->i_blocks = ni->itype.compressed.size >> 9;
else
vi->i_blocks = ni->allocated_size >> 9;
/*
* Make sure the base inode does not go away and attach it to the
* attribute inode.
*/
igrab(base_vi);
ni->ext.base_ntfs_ino = base_ni;
ni->nr_extents = -1;
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
ntfs_debug("Done.");
return 0;
unm_err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
err_out:
ntfs_error(vol->sb, "Failed with error code %i while reading attribute "
"inode (mft_no 0x%lx, type 0x%x, name_len %i). "
"Marking corrupt inode and base inode 0x%lx as bad. "
"Run chkdsk.", err, vi->i_ino, ni->type, ni->name_len,
base_vi->i_ino);
make_bad_inode(vi);
if (err != -ENOMEM)
NVolSetErrors(vol);
return err;
}
/**
* ntfs_read_locked_index_inode - read an index inode from its base inode
* @base_vi: base inode
* @vi: index inode to read
*
* ntfs_read_locked_index_inode() is called from ntfs_index_iget() to read the
* index inode described by @vi into memory from the base mft record described
* by @base_ni.
*
* ntfs_read_locked_index_inode() maps, pins and locks the base inode for
* reading and looks up the attributes relating to the index described by @vi
* before setting up the necessary fields in @vi as well as initializing the
* ntfs inode.
*
* Note, index inodes are essentially attribute inodes (NInoAttr() is true)
* with the attribute type set to AT_INDEX_ALLOCATION. Apart from that, they
* are setup like directory inodes since directories are a special case of
* indices ao they need to be treated in much the same way. Most importantly,
* for small indices the index allocation attribute might not actually exist.
* However, the index root attribute always exists but this does not need to
* have an inode associated with it and this is why we define a new inode type
* index. Also, like for directories, we need to have an attribute inode for
* the bitmap attribute corresponding to the index allocation attribute and we
* can store this in the appropriate field of the inode, just like we do for
* normal directory inodes.
*
* Q: What locks are held when the function is called?
* A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
*
* Return 0 on success and -errno on error. In the error case, the inode will
* have had make_bad_inode() executed on it.
*/
static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
{
loff_t bvi_size;
ntfs_volume *vol = NTFS_SB(vi->i_sb);
ntfs_inode *ni, *base_ni, *bni;
struct inode *bvi;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
INDEX_ROOT *ir;
u8 *ir_end, *index_end;
int err = 0;
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
vi->i_mtime = base_vi->i_mtime;
vi->i_ctime = base_vi->i_ctime;
vi->i_atime = base_vi->i_atime;
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
vi->i_mode = base_vi->i_mode & ~S_IFMT;
/* Map the mft record for the base inode. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (!ctx) {
err = -ENOMEM;
goto unm_err_out;
}
/* Find the index root attribute. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is "
"missing.");
goto unm_err_out;
}
a = ctx->attr;
/* Set up the state. */
if (unlikely(a->non_resident)) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute is not resident.");
goto unm_err_out;
}
/* Ensure the attribute name is placed before the value. */
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(a->data.resident.value_offset)))) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute name is placed "
"after the attribute value.");
goto unm_err_out;
}
/*
* Compressed/encrypted/sparse index root is not allowed, except for
* directories of course but those are not dealt with here.
*/
if (a->flags & (ATTR_COMPRESSION_MASK | ATTR_IS_ENCRYPTED |
ATTR_IS_SPARSE)) {
ntfs_error(vi->i_sb, "Found compressed/encrypted/sparse index "
"root attribute.");
goto unm_err_out;
}
ir = (INDEX_ROOT*)((u8*)a + le16_to_cpu(a->data.resident.value_offset));
ir_end = (u8*)ir + le32_to_cpu(a->data.resident.value_length);
if (ir_end > (u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is corrupt.");
goto unm_err_out;
}
index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
if (index_end > ir_end) {
ntfs_error(vi->i_sb, "Index is corrupt.");
goto unm_err_out;
}
if (ir->type) {
ntfs_error(vi->i_sb, "Index type is not 0 (type is 0x%x).",
le32_to_cpu(ir->type));
goto unm_err_out;
}
ni->itype.index.collation_rule = ir->collation_rule;
ntfs_debug("Index collation rule is 0x%x.",
le32_to_cpu(ir->collation_rule));
ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
if (!is_power_of_2(ni->itype.index.block_size)) {
ntfs_error(vi->i_sb, "Index block size (%u) is not a power of "
"two.", ni->itype.index.block_size);
goto unm_err_out;
}
if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE "
"(%ld) is not supported. Sorry.",
ni->itype.index.block_size, PAGE_CACHE_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (ni->itype.index.block_size < NTFS_BLOCK_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) < NTFS_BLOCK_SIZE "
"(%i) is not supported. Sorry.",
ni->itype.index.block_size, NTFS_BLOCK_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
ni->itype.index.block_size_bits = ffs(ni->itype.index.block_size) - 1;
/* Determine the size of a vcn in the index. */
if (vol->cluster_size <= ni->itype.index.block_size) {
ni->itype.index.vcn_size = vol->cluster_size;
ni->itype.index.vcn_size_bits = vol->cluster_size_bits;
} else {
ni->itype.index.vcn_size = vol->sector_size;
ni->itype.index.vcn_size_bits = vol->sector_size_bits;
}
/* Check for presence of index allocation attribute. */
if (!(ir->index.flags & LARGE_INDEX)) {
/* No index allocation. */
vi->i_size = ni->initialized_size = ni->allocated_size = 0;
/* We are done with the mft record, so we release it. */
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
m = NULL;
ctx = NULL;
goto skip_large_index_stuff;
} /* LARGE_INDEX: Index allocation present. Setup state. */
NInoSetIndexAllocPresent(ni);
/* Find index allocation attribute. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_INDEX_ALLOCATION, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"not present but $INDEX_ROOT "
"indicated it is.");
else
ntfs_error(vi->i_sb, "Failed to lookup "
"$INDEX_ALLOCATION attribute.");
goto unm_err_out;
}
a = ctx->attr;
if (!a->non_resident) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"resident.");
goto unm_err_out;
}
/*
* Ensure the attribute name is placed before the mapping pairs array.
*/
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(
a->data.non_resident.mapping_pairs_offset)))) {
ntfs_error(vol->sb, "$INDEX_ALLOCATION attribute name is "
"placed after the mapping pairs array.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"encrypted.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_SPARSE) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is sparse.");
goto unm_err_out;
}
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"compressed.");
goto unm_err_out;
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of $INDEX_ALLOCATION "
"attribute has non zero lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(a->data.non_resident.allocated_size);
/*
* We are done with the mft record, so we release it. Otherwise
* we would deadlock in ntfs_attr_iget().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
m = NULL;
ctx = NULL;
/* Get the index bitmap attribute inode. */
bvi = ntfs_attr_iget(base_vi, AT_BITMAP, ni->name, ni->name_len);
if (IS_ERR(bvi)) {
ntfs_error(vi->i_sb, "Failed to get bitmap attribute.");
err = PTR_ERR(bvi);
goto unm_err_out;
}
bni = NTFS_I(bvi);
if (NInoCompressed(bni) || NInoEncrypted(bni) ||
NInoSparse(bni)) {
ntfs_error(vi->i_sb, "$BITMAP attribute is compressed and/or "
"encrypted and/or sparse.");
goto iput_unm_err_out;
}
/* Consistency check bitmap size vs. index allocation size. */
bvi_size = i_size_read(bvi);
if ((bvi_size << 3) < (vi->i_size >> ni->itype.index.block_size_bits)) {
ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) for "
"index allocation (0x%llx).", bvi_size << 3,
vi->i_size);
goto iput_unm_err_out;
}
iput(bvi);
skip_large_index_stuff:
/* Setup the operations for this index inode. */
vi->i_op = NULL;
vi->i_fop = NULL;
vi->i_mapping->a_ops = &ntfs_mst_aops;
vi->i_blocks = ni->allocated_size >> 9;
/*
* Make sure the base inode doesn't go away and attach it to the
* index inode.
*/
igrab(base_vi);
ni->ext.base_ntfs_ino = base_ni;
ni->nr_extents = -1;
ntfs_debug("Done.");
return 0;
iput_unm_err_out:
iput(bvi);
unm_err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
err_out:
ntfs_error(vi->i_sb, "Failed with error code %i while reading index "
"inode (mft_no 0x%lx, name_len %i.", err, vi->i_ino,
ni->name_len);
make_bad_inode(vi);
if (err != -EOPNOTSUPP && err != -ENOMEM)
NVolSetErrors(vol);
return err;
}
/*
* The MFT inode has special locking, so teach the lock validator
* about this by splitting off the locking rules of the MFT from
* the locking rules of other inodes. The MFT inode can never be
* accessed from the VFS side (or even internally), only by the
* map_mft functions.
*/
static struct lock_class_key mft_ni_runlist_lock_key, mft_ni_mrec_lock_key;
/**
* ntfs_read_inode_mount - special read_inode for mount time use only
* @vi: inode to read
*
* Read inode FILE_MFT at mount time, only called with super_block lock
* held from within the read_super() code path.
*
* This function exists because when it is called the page cache for $MFT/$DATA
* is not initialized and hence we cannot get at the contents of mft records
* by calling map_mft_record*().
*
* Further it needs to cope with the circular references problem, i.e. cannot
* load any attributes other than $ATTRIBUTE_LIST until $DATA is loaded, because
* we do not know where the other extent mft records are yet and again, because
* we cannot call map_mft_record*() yet. Obviously this applies only when an
* attribute list is actually present in $MFT inode.
*
* We solve these problems by starting with the $DATA attribute before anything
* else and iterating using ntfs_attr_lookup($DATA) over all extents. As each
* extent is found, we ntfs_mapping_pairs_decompress() including the implied
* ntfs_runlists_merge(). Each step of the iteration necessarily provides
* sufficient information for the next step to complete.
*
* This should work but there are two possible pit falls (see inline comments
* below), but only time will tell if they are real pits or just smoke...
*/
int ntfs_read_inode_mount(struct inode *vi)
{
VCN next_vcn, last_vcn, highest_vcn;
s64 block;
struct super_block *sb = vi->i_sb;
ntfs_volume *vol = NTFS_SB(sb);
struct buffer_head *bh;
ntfs_inode *ni;
MFT_RECORD *m = NULL;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
unsigned int i, nr_blocks;
int err;
ntfs_debug("Entering.");
/* Initialize the ntfs specific part of @vi. */
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
/* Setup the data attribute. It is special as it is mst protected. */
NInoSetNonResident(ni);
NInoSetMstProtected(ni);
NInoSetSparseDisabled(ni);
ni->type = AT_DATA;
ni->name = NULL;
ni->name_len = 0;
/*
* This sets up our little cheat allowing us to reuse the async read io
* completion handler for directories.
*/
ni->itype.index.block_size = vol->mft_record_size;
ni->itype.index.block_size_bits = vol->mft_record_size_bits;
/* Very important! Needed to be able to call map_mft_record*(). */
vol->mft_ino = vi;
/* Allocate enough memory to read the first mft record. */
if (vol->mft_record_size > 64 * 1024) {
ntfs_error(sb, "Unsupported mft record size %i (max 64kiB).",
vol->mft_record_size);
goto err_out;
}
i = vol->mft_record_size;
if (i < sb->s_blocksize)
i = sb->s_blocksize;
m = (MFT_RECORD*)ntfs_malloc_nofs(i);
if (!m) {
ntfs_error(sb, "Failed to allocate buffer for $MFT record 0.");
goto err_out;
}
/* Determine the first block of the $MFT/$DATA attribute. */
block = vol->mft_lcn << vol->cluster_size_bits >>
sb->s_blocksize_bits;
nr_blocks = vol->mft_record_size >> sb->s_blocksize_bits;
if (!nr_blocks)
nr_blocks = 1;
/* Load $MFT/$DATA's first mft record. */
for (i = 0; i < nr_blocks; i++) {
bh = sb_bread(sb, block++);
if (!bh) {
ntfs_error(sb, "Device read failed.");
goto err_out;
}
memcpy((char*)m + (i << sb->s_blocksize_bits), bh->b_data,
sb->s_blocksize);
brelse(bh);
}
/* Apply the mst fixups. */
if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
/* FIXME: Try to use the $MFTMirr now. */
ntfs_error(sb, "MST fixup failed. $MFT is corrupt.");
goto err_out;
}
/* Need this to sanity check attribute list references to $MFT. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
/* Provides readpage() and sync_page() for map_mft_record(). */
vi->i_mapping->a_ops = &ntfs_mst_aops;
ctx = ntfs_attr_get_search_ctx(ni, m);
if (!ctx) {
err = -ENOMEM;
goto err_out;
}
/* Find the attribute list attribute if present. */
err = ntfs_attr_lookup(AT_ATTRIBUTE_LIST, NULL, 0, 0, 0, NULL, 0, ctx);
if (err) {
if (unlikely(err != -ENOENT)) {
ntfs_error(sb, "Failed to lookup attribute list "
"attribute. You should run chkdsk.");
goto put_err_out;
}
} else /* if (!err) */ {
ATTR_LIST_ENTRY *al_entry, *next_al_entry;
u8 *al_end;
static const char *es = " Not allowed. $MFT is corrupt. "
"You should run chkdsk.";
ntfs_debug("Attribute list attribute found in $MFT.");
NInoSetAttrList(ni);
a = ctx->attr;
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(sb, "Attribute list attribute is "
"compressed.%s", es);
goto put_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED ||
a->flags & ATTR_IS_SPARSE) {
if (a->non_resident) {
ntfs_error(sb, "Non-resident attribute list "
"attribute is encrypted/"
"sparse.%s", es);
goto put_err_out;
}
ntfs_warning(sb, "Resident attribute list attribute "
"in $MFT system file is marked "
"encrypted/sparse which is not true. "
"However, Windows allows this and "
"chkdsk does not detect or correct it "
"so we will just ignore the invalid "
"flags and pretend they are not set.");
}
/* Now allocate memory for the attribute list. */
ni->attr_list_size = (u32)ntfs_attr_size(a);
ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
if (!ni->attr_list) {
ntfs_error(sb, "Not enough memory to allocate buffer "
"for attribute list.");
goto put_err_out;
}
if (a->non_resident) {
NInoSetAttrListNonResident(ni);
if (a->data.non_resident.lowest_vcn) {
ntfs_error(sb, "Attribute list has non zero "
"lowest_vcn. $MFT is corrupt. "
"You should run chkdsk.");
goto put_err_out;
}
/* Setup the runlist. */
ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
a, NULL);
if (IS_ERR(ni->attr_list_rl.rl)) {
err = PTR_ERR(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
ntfs_error(sb, "Mapping pairs decompression "
"failed with error code %i.",
-err);
goto put_err_out;
}
/* Now load the attribute list. */
if ((err = load_attribute_list(vol, &ni->attr_list_rl,
ni->attr_list, ni->attr_list_size,
sle64_to_cpu(a->data.
non_resident.initialized_size)))) {
ntfs_error(sb, "Failed to load attribute list "
"attribute with error code %i.",
-err);
goto put_err_out;
}
} else /* if (!ctx.attr->non_resident) */ {
if ((u8*)a + le16_to_cpu(
a->data.resident.value_offset) +
le32_to_cpu(
a->data.resident.value_length) >
(u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(sb, "Corrupt attribute list "
"attribute.");
goto put_err_out;
}
/* Now copy the attribute list. */
memcpy(ni->attr_list, (u8*)a + le16_to_cpu(
a->data.resident.value_offset),
le32_to_cpu(
a->data.resident.value_length));
}
/* The attribute list is now setup in memory. */
/*
* FIXME: I don't know if this case is actually possible.
* According to logic it is not possible but I have seen too
* many weird things in MS software to rely on logic... Thus we
* perform a manual search and make sure the first $MFT/$DATA
* extent is in the base inode. If it is not we abort with an
* error and if we ever see a report of this error we will need
* to do some magic in order to have the necessary mft record
* loaded and in the right place in the page cache. But
* hopefully logic will prevail and this never happens...
*/
al_entry = (ATTR_LIST_ENTRY*)ni->attr_list;
al_end = (u8*)al_entry + ni->attr_list_size;
for (;; al_entry = next_al_entry) {
/* Out of bounds check. */
if ((u8*)al_entry < ni->attr_list ||
(u8*)al_entry > al_end)
goto em_put_err_out;
/* Catch the end of the attribute list. */
if ((u8*)al_entry == al_end)
goto em_put_err_out;
if (!al_entry->length)
goto em_put_err_out;
if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
le16_to_cpu(al_entry->length) > al_end)
goto em_put_err_out;
next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
le16_to_cpu(al_entry->length));
if (le32_to_cpu(al_entry->type) > le32_to_cpu(AT_DATA))
goto em_put_err_out;
if (AT_DATA != al_entry->type)
continue;
/* We want an unnamed attribute. */
if (al_entry->name_length)
goto em_put_err_out;
/* Want the first entry, i.e. lowest_vcn == 0. */
if (al_entry->lowest_vcn)
goto em_put_err_out;
/* First entry has to be in the base mft record. */
if (MREF_LE(al_entry->mft_reference) != vi->i_ino) {
/* MFT references do not match, logic fails. */
ntfs_error(sb, "BUG: The first $DATA extent "
"of $MFT is not in the base "
"mft record. Please report "
"you saw this message to "
"linux-ntfs-dev@lists."
"sourceforge.net");
goto put_err_out;
} else {
/* Sequence numbers must match. */
if (MSEQNO_LE(al_entry->mft_reference) !=
ni->seq_no)
goto em_put_err_out;
/* Got it. All is ok. We can stop now. */
break;
}
}
}
ntfs_attr_reinit_search_ctx(ctx);
/* Now load all attribute extents. */
a = NULL;
next_vcn = last_vcn = highest_vcn = 0;
while (!(err = ntfs_attr_lookup(AT_DATA, NULL, 0, 0, next_vcn, NULL, 0,
ctx))) {
runlist_element *nrl;
/* Cache the current attribute. */
a = ctx->attr;
/* $MFT must be non-resident. */
if (!a->non_resident) {
ntfs_error(sb, "$MFT must be non-resident but a "
"resident extent was found. $MFT is "
"corrupt. Run chkdsk.");
goto put_err_out;
}
/* $MFT must be uncompressed and unencrypted. */
if (a->flags & ATTR_COMPRESSION_MASK ||
a->flags & ATTR_IS_ENCRYPTED ||
a->flags & ATTR_IS_SPARSE) {
ntfs_error(sb, "$MFT must be uncompressed, "
"non-sparse, and unencrypted but a "
"compressed/sparse/encrypted extent "
"was found. $MFT is corrupt. Run "
"chkdsk.");
goto put_err_out;
}
/*
* Decompress the mapping pairs array of this extent and merge
* the result into the existing runlist. No need for locking
* as we have exclusive access to the inode at this time and we
* are a mount in progress task, too.
*/
nrl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
if (IS_ERR(nrl)) {
ntfs_error(sb, "ntfs_mapping_pairs_decompress() "
"failed with error code %ld. $MFT is "
"corrupt.", PTR_ERR(nrl));
goto put_err_out;
}
ni->runlist.rl = nrl;
/* Are we in the first extent? */
if (!next_vcn) {
if (a->data.non_resident.lowest_vcn) {
ntfs_error(sb, "First extent of $DATA "
"attribute has non zero "
"lowest_vcn. $MFT is corrupt. "
"You should run chkdsk.");
goto put_err_out;
}
/* Get the last vcn in the $DATA attribute. */
last_vcn = sle64_to_cpu(
a->data.non_resident.allocated_size)
>> vol->cluster_size_bits;
/* Fill in the inode size. */
vi->i_size = sle64_to_cpu(
a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
/*
* Verify the number of mft records does not exceed
* 2^32 - 1.
*/
if ((vi->i_size >> vol->mft_record_size_bits) >=
(1ULL << 32)) {
ntfs_error(sb, "$MFT is too big! Aborting.");
goto put_err_out;
}
/*
* We have got the first extent of the runlist for
* $MFT which means it is now relatively safe to call
* the normal ntfs_read_inode() function.
* Complete reading the inode, this will actually
* re-read the mft record for $MFT, this time entering
* it into the page cache with which we complete the
* kick start of the volume. It should be safe to do
* this now as the first extent of $MFT/$DATA is
* already known and we would hope that we don't need
* further extents in order to find the other
* attributes belonging to $MFT. Only time will tell if
* this is really the case. If not we will have to play
* magic at this point, possibly duplicating a lot of
* ntfs_read_inode() at this point. We will need to
* ensure we do enough of its work to be able to call
* ntfs_read_inode() on extents of $MFT/$DATA. But lets
* hope this never happens...
*/
ntfs_read_locked_inode(vi);
if (is_bad_inode(vi)) {
ntfs_error(sb, "ntfs_read_inode() of $MFT "
"failed. BUG or corrupt $MFT. "
"Run chkdsk and if no errors "
"are found, please report you "
"saw this message to "
"linux-ntfs-dev@lists."
"sourceforge.net");
ntfs_attr_put_search_ctx(ctx);
/* Revert to the safe super operations. */
ntfs_free(m);
return -1;
}
/*
* Re-initialize some specifics about $MFT's inode as
* ntfs_read_inode() will have set up the default ones.
*/
/* Set uid and gid to root. */
vi->i_uid = GLOBAL_ROOT_UID;
vi->i_gid = GLOBAL_ROOT_GID;
/* Regular file. No access for anyone. */
vi->i_mode = S_IFREG;
/* No VFS initiated operations allowed for $MFT. */
vi->i_op = &ntfs_empty_inode_ops;
vi->i_fop = &ntfs_empty_file_ops;
}
/* Get the lowest vcn for the next extent. */
highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
next_vcn = highest_vcn + 1;
/* Only one extent or error, which we catch below. */
if (next_vcn <= 0)
break;
/* Avoid endless loops due to corruption. */
if (next_vcn < sle64_to_cpu(
a->data.non_resident.lowest_vcn)) {
ntfs_error(sb, "$MFT has corrupt attribute list "
"attribute. Run chkdsk.");
goto put_err_out;
}
}
if (err != -ENOENT) {
ntfs_error(sb, "Failed to lookup $MFT/$DATA attribute extent. "
"$MFT is corrupt. Run chkdsk.");
goto put_err_out;
}
if (!a) {
ntfs_error(sb, "$MFT/$DATA attribute not found. $MFT is "
"corrupt. Run chkdsk.");
goto put_err_out;
}
if (highest_vcn && highest_vcn != last_vcn - 1) {
ntfs_error(sb, "Failed to load the complete runlist for "
"$MFT/$DATA. Driver bug or corrupt $MFT. "
"Run chkdsk.");
ntfs_debug("highest_vcn = 0x%llx, last_vcn - 1 = 0x%llx",
(unsigned long long)highest_vcn,
(unsigned long long)last_vcn - 1);
goto put_err_out;
}
ntfs_attr_put_search_ctx(ctx);
ntfs_debug("Done.");
ntfs_free(m);
/*
* Split the locking rules of the MFT inode from the
* locking rules of other inodes:
*/
lockdep_set_class(&ni->runlist.lock, &mft_ni_runlist_lock_key);
lockdep_set_class(&ni->mrec_lock, &mft_ni_mrec_lock_key);
return 0;
em_put_err_out:
ntfs_error(sb, "Couldn't find first extent of $DATA attribute in "
"attribute list. $MFT is corrupt. Run chkdsk.");
put_err_out:
ntfs_attr_put_search_ctx(ctx);
err_out:
ntfs_error(sb, "Failed. Marking inode as bad.");
make_bad_inode(vi);
ntfs_free(m);
return -1;
}
static void __ntfs_clear_inode(ntfs_inode *ni)
{
/* Free all alocated memory. */
down_write(&ni->runlist.lock);
if (ni->runlist.rl) {
ntfs_free(ni->runlist.rl);
ni->runlist.rl = NULL;
}
up_write(&ni->runlist.lock);
if (ni->attr_list) {
ntfs_free(ni->attr_list);
ni->attr_list = NULL;
}
down_write(&ni->attr_list_rl.lock);
if (ni->attr_list_rl.rl) {
ntfs_free(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
}
up_write(&ni->attr_list_rl.lock);
if (ni->name_len && ni->name != I30) {
/* Catch bugs... */
BUG_ON(!ni->name);
kfree(ni->name);
}
}
void ntfs_clear_extent_inode(ntfs_inode *ni)
{
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni));
BUG_ON(ni->nr_extents != -1);
#ifdef NTFS_RW
if (NInoDirty(ni)) {
if (!is_bad_inode(VFS_I(ni->ext.base_ntfs_ino)))
ntfs_error(ni->vol->sb, "Clearing dirty extent inode! "
"Losing data! This is a BUG!!!");
// FIXME: Do something!!!
}
#endif /* NTFS_RW */
__ntfs_clear_inode(ni);
/* Bye, bye... */
ntfs_destroy_extent_inode(ni);
}
/**
* ntfs_evict_big_inode - clean up the ntfs specific part of an inode
* @vi: vfs inode pending annihilation
*
* When the VFS is going to remove an inode from memory, ntfs_clear_big_inode()
* is called, which deallocates all memory belonging to the NTFS specific part
* of the inode and returns.
*
* If the MFT record is dirty, we commit it before doing anything else.
*/
void ntfs_evict_big_inode(struct inode *vi)
{
ntfs_inode *ni = NTFS_I(vi);
truncate_inode_pages(&vi->i_data, 0);
clear_inode(vi);
#ifdef NTFS_RW
if (NInoDirty(ni)) {
bool was_bad = (is_bad_inode(vi));
/* Committing the inode also commits all extent inodes. */
ntfs_commit_inode(vi);
if (!was_bad && (is_bad_inode(vi) || NInoDirty(ni))) {
ntfs_error(vi->i_sb, "Failed to commit dirty inode "
"0x%lx. Losing data!", vi->i_ino);
// FIXME: Do something!!!
}
}
#endif /* NTFS_RW */
/* No need to lock at this stage as no one else has a reference. */
if (ni->nr_extents > 0) {
int i;
for (i = 0; i < ni->nr_extents; i++)
ntfs_clear_extent_inode(ni->ext.extent_ntfs_inos[i]);
kfree(ni->ext.extent_ntfs_inos);
}
__ntfs_clear_inode(ni);
if (NInoAttr(ni)) {
/* Release the base inode if we are holding it. */
if (ni->nr_extents == -1) {
iput(VFS_I(ni->ext.base_ntfs_ino));
ni->nr_extents = 0;
ni->ext.base_ntfs_ino = NULL;
}
}
return;
}
/**
* ntfs_show_options - show mount options in /proc/mounts
* @sf: seq_file in which to write our mount options
* @root: root of the mounted tree whose mount options to display
*
* Called by the VFS once for each mounted ntfs volume when someone reads
* /proc/mounts in order to display the NTFS specific mount options of each
* mount. The mount options of fs specified by @root are written to the seq file
* @sf and success is returned.
*/
int ntfs_show_options(struct seq_file *sf, struct dentry *root)
{
ntfs_volume *vol = NTFS_SB(root->d_sb);
int i;
seq_printf(sf, ",uid=%i", from_kuid_munged(&init_user_ns, vol->uid));
seq_printf(sf, ",gid=%i", from_kgid_munged(&init_user_ns, vol->gid));
if (vol->fmask == vol->dmask)
seq_printf(sf, ",umask=0%o", vol->fmask);
else {
seq_printf(sf, ",fmask=0%o", vol->fmask);
seq_printf(sf, ",dmask=0%o", vol->dmask);
}
seq_printf(sf, ",nls=%s", vol->nls_map->charset);
if (NVolCaseSensitive(vol))
seq_printf(sf, ",case_sensitive");
if (NVolShowSystemFiles(vol))
seq_printf(sf, ",show_sys_files");
if (!NVolSparseEnabled(vol))
seq_printf(sf, ",disable_sparse");
for (i = 0; on_errors_arr[i].val; i++) {
if (on_errors_arr[i].val & vol->on_errors)
seq_printf(sf, ",errors=%s", on_errors_arr[i].str);
}
seq_printf(sf, ",mft_zone_multiplier=%i", vol->mft_zone_multiplier);
return 0;
}
#ifdef NTFS_RW
static const char *es = " Leaving inconsistent metadata. Unmount and run "
"chkdsk.";
/**
* ntfs_truncate - called when the i_size of an ntfs inode is changed
* @vi: inode for which the i_size was changed
*
* We only support i_size changes for normal files at present, i.e. not
* compressed and not encrypted. This is enforced in ntfs_setattr(), see
* below.
*
* The kernel guarantees that @vi is a regular file (S_ISREG() is true) and
* that the change is allowed.
*
* This implies for us that @vi is a file inode rather than a directory, index,
* or attribute inode as well as that @vi is a base inode.
*
* Returns 0 on success or -errno on error.
*
* Called with ->i_mutex held.
*/
int ntfs_truncate(struct inode *vi)
{
s64 new_size, old_size, nr_freed, new_alloc_size, old_alloc_size;
VCN highest_vcn;
unsigned long flags;
ntfs_inode *base_ni, *ni = NTFS_I(vi);
ntfs_volume *vol = ni->vol;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *m;
ATTR_RECORD *a;
const char *te = " Leaving file length out of sync with i_size.";
int err, mp_size, size_change, alloc_change;
u32 attr_len;
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
BUG_ON(NInoAttr(ni));
BUG_ON(S_ISDIR(vi->i_mode));
BUG_ON(NInoMstProtected(ni));
BUG_ON(ni->nr_extents < 0);
retry_truncate:
/*
* Lock the runlist for writing and map the mft record to ensure it is
* safe to mess with the attribute runlist and sizes.
*/
down_write(&ni->runlist.lock);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
ntfs_error(vi->i_sb, "Failed to map mft record for inode 0x%lx "
"(error code %d).%s", vi->i_ino, err, te);
ctx = NULL;
m = NULL;
goto old_bad_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
ntfs_error(vi->i_sb, "Failed to allocate a search context for "
"inode 0x%lx (not enough memory).%s",
vi->i_ino, te);
err = -ENOMEM;
goto old_bad_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
ntfs_error(vi->i_sb, "Open attribute is missing from "
"mft record. Inode 0x%lx is corrupt. "
"Run chkdsk.%s", vi->i_ino, te);
err = -EIO;
} else
ntfs_error(vi->i_sb, "Failed to lookup attribute in "
"inode 0x%lx (error code %d).%s",
vi->i_ino, err, te);
goto old_bad_out;
}
m = ctx->mrec;
a = ctx->attr;
/*
* The i_size of the vfs inode is the new size for the attribute value.
*/
new_size = i_size_read(vi);
/* The current size of the attribute value is the old size. */
old_size = ntfs_attr_size(a);
/* Calculate the new allocated size. */
if (NInoNonResident(ni))
new_alloc_size = (new_size + vol->cluster_size - 1) &
~(s64)vol->cluster_size_mask;
else
new_alloc_size = (new_size + 7) & ~7;
/* The current allocated size is the old allocated size. */
read_lock_irqsave(&ni->size_lock, flags);
old_alloc_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
/*
* The change in the file size. This will be 0 if no change, >0 if the
* size is growing, and <0 if the size is shrinking.
*/
size_change = -1;
if (new_size - old_size >= 0) {
size_change = 1;
if (new_size == old_size)
size_change = 0;
}
/* As above for the allocated size. */
alloc_change = -1;
if (new_alloc_size - old_alloc_size >= 0) {
alloc_change = 1;
if (new_alloc_size == old_alloc_size)
alloc_change = 0;
}
/*
* If neither the size nor the allocation are being changed there is
* nothing to do.
*/
if (!size_change && !alloc_change)
goto unm_done;
/* If the size is changing, check if new size is allowed in $AttrDef. */
if (size_change) {
err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
if (unlikely(err)) {
if (err == -ERANGE) {
ntfs_error(vol->sb, "Truncate would cause the "
"inode 0x%lx to %simum size "
"for its attribute type "
"(0x%x). Aborting truncate.",
vi->i_ino,
new_size > old_size ? "exceed "
"the max" : "go under the min",
le32_to_cpu(ni->type));
err = -EFBIG;
} else {
ntfs_error(vol->sb, "Inode 0x%lx has unknown "
"attribute type 0x%x. "
"Aborting truncate.",
vi->i_ino,
le32_to_cpu(ni->type));
err = -EIO;
}
/* Reset the vfs inode size to the old size. */
i_size_write(vi, old_size);
goto err_out;
}
}
if (NInoCompressed(ni) || NInoEncrypted(ni)) {
ntfs_warning(vi->i_sb, "Changes in inode size are not "
"supported yet for %s files, ignoring.",
NInoCompressed(ni) ? "compressed" :
"encrypted");
err = -EOPNOTSUPP;
goto bad_out;
}
if (a->non_resident)
goto do_non_resident_truncate;
BUG_ON(NInoNonResident(ni));
/* Resize the attribute record to best fit the new attribute size. */
if (new_size < vol->mft_record_size &&
!ntfs_resident_attr_value_resize(m, a, new_size)) {
/* The resize succeeded! */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
write_lock_irqsave(&ni->size_lock, flags);
/* Update the sizes in the ntfs inode and all is done. */
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset);
/*
* Note ntfs_resident_attr_value_resize() has already done any
* necessary data clearing in the attribute record. When the
* file is being shrunk vmtruncate() will already have cleared
* the top part of the last partial page, i.e. since this is
* the resident case this is the page with index 0. However,
* when the file is being expanded, the page cache page data
* between the old data_size, i.e. old_size, and the new_size
* has not been zeroed. Fortunately, we do not need to zero it
* either since on one hand it will either already be zero due
* to both readpage and writepage clearing partial page data
* beyond i_size in which case there is nothing to do or in the
* case of the file being mmap()ped at the same time, POSIX
* specifies that the behaviour is unspecified thus we do not
* have to do anything. This means that in our implementation
* in the rare case that the file is mmap()ped and a write
* occurred into the mmap()ped region just beyond the file size
* and writepage has not yet been called to write out the page
* (which would clear the area beyond the file size) and we now
* extend the file size to incorporate this dirty region
* outside the file size, a write of the page would result in
* this data being written to disk instead of being cleared.
* Given both POSIX and the Linux mmap(2) man page specify that
* this corner case is undefined, we choose to leave it like
* that as this is much simpler for us as we cannot lock the
* relevant page now since we are holding too many ntfs locks
* which would result in a lock reversal deadlock.
*/
ni->initialized_size = new_size;
write_unlock_irqrestore(&ni->size_lock, flags);
goto unm_done;
}
/* If the above resize failed, this must be an attribute extension. */
BUG_ON(size_change < 0);
/*
* We have to drop all the locks so we can call
* ntfs_attr_make_non_resident(). This could be optimised by try-
* locking the first page cache page and only if that fails dropping
* the locks, locking the page, and redoing all the locking and
* lookups. While this would be a huge optimisation, it is not worth
* it as this is definitely a slow code path as it only ever can happen
* once for any given file.
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* Not enough space in the mft record, try to make the attribute
* non-resident and if successful restart the truncation process.
*/
err = ntfs_attr_make_non_resident(ni, old_size);
if (likely(!err))
goto retry_truncate;
/*
* Could not make non-resident. If this is due to this not being
* permitted for this attribute type or there not being enough space,
* try to make other attributes non-resident. Otherwise fail.
*/
if (unlikely(err != -EPERM && err != -ENOSPC)) {
ntfs_error(vol->sb, "Cannot truncate inode 0x%lx, attribute "
"type 0x%x, because the conversion from "
"resident to non-resident attribute failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
goto conv_err_out;
}
/* TODO: Not implemented from here, abort. */
if (err == -ENOSPC)
ntfs_error(vol->sb, "Not enough space in the mft record/on "
"disk for the non-resident attribute value. "
"This case is not implemented yet.");
else /* if (err == -EPERM) */
ntfs_error(vol->sb, "This attribute type may not be "
"non-resident. This case is not implemented "
"yet.");
err = -EOPNOTSUPP;
goto conv_err_out;
#if 0
// TODO: Attempt to make other attributes non-resident.
if (!err)
goto do_resident_extend;
/*
* Both the attribute list attribute and the standard information
* attribute must remain in the base inode. Thus, if this is one of
* these attributes, we have to try to move other attributes out into
* extent mft records instead.
*/
if (ni->type == AT_ATTRIBUTE_LIST ||
ni->type == AT_STANDARD_INFORMATION) {
// TODO: Attempt to move other attributes into extent mft
// records.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
goto err_out;
}
// TODO: Attempt to move this attribute to an extent mft record, but
// only if it is not already the only attribute in an mft record in
// which case there would be nothing to gain.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
/* There is nothing we can do to make enough space. )-: */
goto err_out;
#endif
do_non_resident_truncate:
BUG_ON(!NInoNonResident(ni));
if (alloc_change < 0) {
highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
if (highest_vcn > 0 &&
old_alloc_size >> vol->cluster_size_bits >
highest_vcn + 1) {
/*
* This attribute has multiple extents. Not yet
* supported.
*/
ntfs_error(vol->sb, "Cannot truncate inode 0x%lx, "
"attribute type 0x%x, because the "
"attribute is highly fragmented (it "
"consists of multiple extents) and "
"this case is not implemented yet.",
vi->i_ino,
(unsigned)le32_to_cpu(ni->type));
err = -EOPNOTSUPP;
goto bad_out;
}
}
/*
* If the size is shrinking, need to reduce the initialized_size and
* the data_size before reducing the allocation.
*/
if (size_change < 0) {
/*
* Make the valid size smaller (i_size is already up-to-date).
*/
write_lock_irqsave(&ni->size_lock, flags);
if (new_size < ni->initialized_size) {
ni->initialized_size = new_size;
a->data.non_resident.initialized_size =
cpu_to_sle64(new_size);
}
a->data.non_resident.data_size = cpu_to_sle64(new_size);
write_unlock_irqrestore(&ni->size_lock, flags);
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
/* If the allocated size is not changing, we are done. */
if (!alloc_change)
goto unm_done;
/*
* If the size is shrinking it makes no sense for the
* allocation to be growing.
*/
BUG_ON(alloc_change > 0);
} else /* if (size_change >= 0) */ {
/*
* The file size is growing or staying the same but the
* allocation can be shrinking, growing or staying the same.
*/
if (alloc_change > 0) {
/*
* We need to extend the allocation and possibly update
* the data size. If we are updating the data size,
* since we are not touching the initialized_size we do
* not need to worry about the actual data on disk.
* And as far as the page cache is concerned, there
* will be no pages beyond the old data size and any
* partial region in the last page between the old and
* new data size (or the end of the page if the new
* data size is outside the page) does not need to be
* modified as explained above for the resident
* attribute truncate case. To do this, we simply drop
* the locks we hold and leave all the work to our
* friendly helper ntfs_attr_extend_allocation().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
err = ntfs_attr_extend_allocation(ni, new_size,
size_change > 0 ? new_size : -1, -1);
/*
* ntfs_attr_extend_allocation() will have done error
* output already.
*/
goto done;
}
if (!alloc_change)
goto alloc_done;
}
/* alloc_change < 0 */
/* Free the clusters. */
nr_freed = ntfs_cluster_free(ni, new_alloc_size >>
vol->cluster_size_bits, -1, ctx);
m = ctx->mrec;
a = ctx->attr;
if (unlikely(nr_freed < 0)) {
ntfs_error(vol->sb, "Failed to release cluster(s) (error code "
"%lli). Unmount and run chkdsk to recover "
"the lost cluster(s).", (long long)nr_freed);
NVolSetErrors(vol);
nr_freed = 0;
}
/* Truncate the runlist. */
err = ntfs_rl_truncate_nolock(vol, &ni->runlist,
new_alloc_size >> vol->cluster_size_bits);
/*
* If the runlist truncation failed and/or the search context is no
* longer valid, we cannot resize the attribute record or build the
* mapping pairs array thus we mark the inode bad so that no access to
* the freed clusters can happen.
*/
if (unlikely(err || IS_ERR(m))) {
ntfs_error(vol->sb, "Failed to %s (error code %li).%s",
IS_ERR(m) ?
"restore attribute search context" :
"truncate attribute runlist",
IS_ERR(m) ? PTR_ERR(m) : err, es);
err = -EIO;
goto bad_out;
}
/* Get the size for the shrunk mapping pairs array for the runlist. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, ni->runlist.rl, 0, -1);
if (unlikely(mp_size <= 0)) {
ntfs_error(vol->sb, "Cannot shrink allocation of inode 0x%lx, "
"attribute type 0x%x, because determining the "
"size for the mapping pairs failed with error "
"code %i.%s", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), mp_size, es);
err = -EIO;
goto bad_out;
}
/*
* Shrink the attribute record for the new mapping pairs array. Note,
* this cannot fail since we are making the attribute smaller thus by
* definition there is enough space to do so.
*/
attr_len = le32_to_cpu(a->length);
err = ntfs_attr_record_resize(m, a, mp_size +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
BUG_ON(err);
/*
* Generate the mapping pairs array directly into the attribute record.
*/
err = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
mp_size, ni->runlist.rl, 0, -1, NULL);
if (unlikely(err)) {
ntfs_error(vol->sb, "Cannot shrink allocation of inode 0x%lx, "
"attribute type 0x%x, because building the "
"mapping pairs failed with error code %i.%s",
vi->i_ino, (unsigned)le32_to_cpu(ni->type),
err, es);
err = -EIO;
goto bad_out;
}
/* Update the allocated/compressed size as well as the highest vcn. */
a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
vol->cluster_size_bits) - 1);
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
if (NInoSparse(ni) || NInoCompressed(ni)) {
if (nr_freed) {
ni->itype.compressed.size -= nr_freed <<
vol->cluster_size_bits;
BUG_ON(ni->itype.compressed.size < 0);
a->data.non_resident.compressed_size = cpu_to_sle64(
ni->itype.compressed.size);
vi->i_blocks = ni->itype.compressed.size >> 9;
}
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
/*
* We have shrunk the allocation. If this is a shrinking truncate we
* have already dealt with the initialized_size and the data_size above
* and we are done. If the truncate is only changing the allocation
* and not the data_size, we are also done. If this is an extending
* truncate, need to extend the data_size now which is ensured by the
* fact that @size_change is positive.
*/
alloc_done:
/*
* If the size is growing, need to update it now. If it is shrinking,
* we have already updated it above (before the allocation change).
*/
if (size_change > 0)
a->data.non_resident.data_size = cpu_to_sle64(new_size);
/* Ensure the modified mft record is written out. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
unm_done:
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
done:
/* Update the mtime and ctime on the base inode. */
/* normally ->truncate shouldn't update ctime or mtime,
* but ntfs did before so it got a copy & paste version
* of file_update_time. one day someone should fix this
* for real.
*/
if (!IS_NOCMTIME(VFS_I(base_ni)) && !IS_RDONLY(VFS_I(base_ni))) {
struct timespec now = current_fs_time(VFS_I(base_ni)->i_sb);
int sync_it = 0;
if (!timespec_equal(&VFS_I(base_ni)->i_mtime, &now) ||
!timespec_equal(&VFS_I(base_ni)->i_ctime, &now))
sync_it = 1;
VFS_I(base_ni)->i_mtime = now;
VFS_I(base_ni)->i_ctime = now;
if (sync_it)
mark_inode_dirty_sync(VFS_I(base_ni));
}
if (likely(!err)) {
NInoClearTruncateFailed(ni);
ntfs_debug("Done.");
}
return err;
old_bad_out:
old_size = -1;
bad_out:
if (err != -ENOMEM && err != -EOPNOTSUPP)
NVolSetErrors(vol);
if (err != -EOPNOTSUPP)
NInoSetTruncateFailed(ni);
else if (old_size >= 0)
i_size_write(vi, old_size);
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
out:
ntfs_debug("Failed. Returning error code %i.", err);
return err;
conv_err_out:
if (err != -ENOMEM && err != -EOPNOTSUPP)
NVolSetErrors(vol);
if (err != -EOPNOTSUPP)
NInoSetTruncateFailed(ni);
else
i_size_write(vi, old_size);
goto out;
}
/**
* ntfs_truncate_vfs - wrapper for ntfs_truncate() that has no return value
* @vi: inode for which the i_size was changed
*
* Wrapper for ntfs_truncate() that has no return value.
*
* See ntfs_truncate() description above for details.
*/
#ifdef NTFS_RW
void ntfs_truncate_vfs(struct inode *vi) {
ntfs_truncate(vi);
}
#endif
/**
* ntfs_setattr - called from notify_change() when an attribute is being changed
* @dentry: dentry whose attributes to change
* @attr: structure describing the attributes and the changes
*
* We have to trap VFS attempts to truncate the file described by @dentry as
* soon as possible, because we do not implement changes in i_size yet. So we
* abort all i_size changes here.
*
* We also abort all changes of user, group, and mode as we do not implement
* the NTFS ACLs yet.
*
* Called with ->i_mutex held.
*/
int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *vi = dentry->d_inode;
int err;
unsigned int ia_valid = attr->ia_valid;
err = inode_change_ok(vi, attr);
if (err)
goto out;
/* We do not support NTFS ACLs yet. */
if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) {
ntfs_warning(vi->i_sb, "Changes in user/group/mode are not "
"supported yet, ignoring.");
err = -EOPNOTSUPP;
goto out;
}
if (ia_valid & ATTR_SIZE) {
if (attr->ia_size != i_size_read(vi)) {
ntfs_inode *ni = NTFS_I(vi);
/*
* FIXME: For now we do not support resizing of
* compressed or encrypted files yet.
*/
if (NInoCompressed(ni) || NInoEncrypted(ni)) {
ntfs_warning(vi->i_sb, "Changes in inode size "
"are not supported yet for "
"%s files, ignoring.",
NInoCompressed(ni) ?
"compressed" : "encrypted");
err = -EOPNOTSUPP;
} else {
truncate_setsize(vi, attr->ia_size);
ntfs_truncate_vfs(vi);
}
if (err || ia_valid == ATTR_SIZE)
goto out;
} else {
/*
* We skipped the truncate but must still update
* timestamps.
*/
ia_valid |= ATTR_MTIME | ATTR_CTIME;
}
}
if (ia_valid & ATTR_ATIME)
vi->i_atime = timespec_trunc(attr->ia_atime,
vi->i_sb->s_time_gran);
if (ia_valid & ATTR_MTIME)
vi->i_mtime = timespec_trunc(attr->ia_mtime,
vi->i_sb->s_time_gran);
if (ia_valid & ATTR_CTIME)
vi->i_ctime = timespec_trunc(attr->ia_ctime,
vi->i_sb->s_time_gran);
mark_inode_dirty(vi);
out:
return err;
}
/**
* ntfs_write_inode - write out a dirty inode
* @vi: inode to write out
* @sync: if true, write out synchronously
*
* Write out a dirty inode to disk including any extent inodes if present.
*
* If @sync is true, commit the inode to disk and wait for io completion. This
* is done using write_mft_record().
*
* If @sync is false, just schedule the write to happen but do not wait for i/o
* completion. In 2.6 kernels, scheduling usually happens just by virtue of
* marking the page (and in this case mft record) dirty but we do not implement
* this yet as write_mft_record() largely ignores the @sync parameter and
* always performs synchronous writes.
*
* Return 0 on success and -errno on error.
*/
int __ntfs_write_inode(struct inode *vi, int sync)
{
sle64 nt;
ntfs_inode *ni = NTFS_I(vi);
ntfs_attr_search_ctx *ctx;
MFT_RECORD *m;
STANDARD_INFORMATION *si;
int err = 0;
bool modified = false;
ntfs_debug("Entering for %sinode 0x%lx.", NInoAttr(ni) ? "attr " : "",
vi->i_ino);
/*
* Dirty attribute inodes are written via their real inodes so just
* clean them here. Access time updates are taken care off when the
* real inode is written.
*/
if (NInoAttr(ni)) {
NInoClearDirty(ni);
ntfs_debug("Done.");
return 0;
}
/* Map, pin, and lock the mft record belonging to the inode. */
m = map_mft_record(ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
/* Update the access times in the standard information attribute. */
ctx = ntfs_attr_get_search_ctx(ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto unm_err_out;
}
err = ntfs_attr_lookup(AT_STANDARD_INFORMATION, NULL, 0,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
ntfs_attr_put_search_ctx(ctx);
goto unm_err_out;
}
si = (STANDARD_INFORMATION*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
/* Update the access times if they have changed. */
nt = utc2ntfs(vi->i_mtime);
if (si->last_data_change_time != nt) {
ntfs_debug("Updating mtime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
sle64_to_cpu(si->last_data_change_time),
(long long)sle64_to_cpu(nt));
si->last_data_change_time = nt;
modified = true;
}
nt = utc2ntfs(vi->i_ctime);
if (si->last_mft_change_time != nt) {
ntfs_debug("Updating ctime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
sle64_to_cpu(si->last_mft_change_time),
(long long)sle64_to_cpu(nt));
si->last_mft_change_time = nt;
modified = true;
}
nt = utc2ntfs(vi->i_atime);
if (si->last_access_time != nt) {
ntfs_debug("Updating atime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino,
(long long)sle64_to_cpu(si->last_access_time),
(long long)sle64_to_cpu(nt));
si->last_access_time = nt;
modified = true;
}
/*
* If we just modified the standard information attribute we need to
* mark the mft record it is in dirty. We do this manually so that
* mark_inode_dirty() is not called which would redirty the inode and
* hence result in an infinite loop of trying to write the inode.
* There is no need to mark the base inode nor the base mft record
* dirty, since we are going to write this mft record below in any case
* and the base mft record may actually not have been modified so it
* might not need to be written out.
* NOTE: It is not a problem when the inode for $MFT itself is being
* written out as mark_ntfs_record_dirty() will only set I_DIRTY_PAGES
* on the $MFT inode and hence ntfs_write_inode() will not be
* re-invoked because of it which in turn is ok since the dirtied mft
* record will be cleaned and written out to disk below, i.e. before
* this function returns.
*/
if (modified) {
flush_dcache_mft_record_page(ctx->ntfs_ino);
if (!NInoTestSetDirty(ctx->ntfs_ino))
mark_ntfs_record_dirty(ctx->ntfs_ino->page,
ctx->ntfs_ino->page_ofs);
}
ntfs_attr_put_search_ctx(ctx);
/* Now the access times are updated, write the base mft record. */
if (NInoDirty(ni))
err = write_mft_record(ni, m, sync);
/* Write all attached extent mft records. */
mutex_lock(&ni->extent_lock);
if (ni->nr_extents > 0) {
ntfs_inode **extent_nis = ni->ext.extent_ntfs_inos;
int i;
ntfs_debug("Writing %i extent inodes.", ni->nr_extents);
for (i = 0; i < ni->nr_extents; i++) {
ntfs_inode *tni = extent_nis[i];
if (NInoDirty(tni)) {
MFT_RECORD *tm = map_mft_record(tni);
int ret;
if (IS_ERR(tm)) {
if (!err || err == -ENOMEM)
err = PTR_ERR(tm);
continue;
}
ret = write_mft_record(tni, tm, sync);
unmap_mft_record(tni);
if (unlikely(ret)) {
if (!err || err == -ENOMEM)
err = ret;
}
}
}
}
mutex_unlock(&ni->extent_lock);
unmap_mft_record(ni);
if (unlikely(err))
goto err_out;
ntfs_debug("Done.");
return 0;
unm_err_out:
unmap_mft_record(ni);
err_out:
if (err == -ENOMEM) {
ntfs_warning(vi->i_sb, "Not enough memory to write inode. "
"Marking the inode dirty again, so the VFS "
"retries later.");
mark_inode_dirty(vi);
} else {
ntfs_error(vi->i_sb, "Failed (error %i): Run chkdsk.", -err);
NVolSetErrors(ni->vol);
}
return err;
}
#endif /* NTFS_RW */
| gpl-2.0 |
androidbftab1/bf-kernel-3.18 | sound/soc/intel/sst-mfld-platform-pcm.c | 323 | 17600 | /*
* sst_mfld_platform.c - Intel MID Platform driver
*
* Copyright (C) 2010-2014 Intel Corp
* Author: Vinod Koul <vinod.koul@intel.com>
* Author: Harsha Priya <priya.harsha@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/compress_driver.h>
#include <asm/platform_sst_audio.h>
#include "sst-mfld-platform.h"
#include "sst-atom-controls.h"
struct sst_device *sst;
static DEFINE_MUTEX(sst_lock);
extern struct snd_compr_ops sst_platform_compr_ops;
int sst_register_dsp(struct sst_device *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
if (!try_module_get(dev->dev->driver->owner))
return -ENODEV;
mutex_lock(&sst_lock);
if (sst) {
dev_err(dev->dev, "we already have a device %s\n", sst->name);
module_put(dev->dev->driver->owner);
mutex_unlock(&sst_lock);
return -EEXIST;
}
dev_dbg(dev->dev, "registering device %s\n", dev->name);
sst = dev;
mutex_unlock(&sst_lock);
return 0;
}
EXPORT_SYMBOL_GPL(sst_register_dsp);
int sst_unregister_dsp(struct sst_device *dev)
{
if (WARN_ON(!dev))
return -EINVAL;
if (dev != sst)
return -EINVAL;
mutex_lock(&sst_lock);
if (!sst) {
mutex_unlock(&sst_lock);
return -EIO;
}
module_put(sst->dev->driver->owner);
dev_dbg(dev->dev, "unreg %s\n", sst->name);
sst = NULL;
mutex_unlock(&sst_lock);
return 0;
}
EXPORT_SYMBOL_GPL(sst_unregister_dsp);
static struct snd_pcm_hardware sst_platform_pcm_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_DOUBLE |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP|
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_SYNC_START),
.buffer_bytes_max = SST_MAX_BUFFER,
.period_bytes_min = SST_MIN_PERIOD_BYTES,
.period_bytes_max = SST_MAX_PERIOD_BYTES,
.periods_min = SST_MIN_PERIODS,
.periods_max = SST_MAX_PERIODS,
.fifo_size = SST_FIFO_SIZE,
};
static struct sst_dev_stream_map dpcm_strm_map[] = {
{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */
{MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA1_IN, SST_TASK_ID_MEDIA, 0},
{MERR_DPCM_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, 0},
{MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, 0},
};
/* MFLD - MSIC */
static struct snd_soc_dai_driver sst_platform_dai[] = {
{
.name = "Headset-cpu-dai",
.id = 0,
.playback = {
.channels_min = SST_STEREO,
.channels_max = SST_STEREO,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S24_LE,
},
.capture = {
.channels_min = 1,
.channels_max = 5,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S24_LE,
},
},
{
.name = "Compress-cpu-dai",
.compress_dai = 1,
.playback = {
.channels_min = SST_STEREO,
.channels_max = SST_STEREO,
.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
},
};
/* helper functions */
void sst_set_stream_status(struct sst_runtime_stream *stream,
int state)
{
unsigned long flags;
spin_lock_irqsave(&stream->status_lock, flags);
stream->stream_status = state;
spin_unlock_irqrestore(&stream->status_lock, flags);
}
static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
{
int state;
unsigned long flags;
spin_lock_irqsave(&stream->status_lock, flags);
state = stream->stream_status;
spin_unlock_irqrestore(&stream->status_lock, flags);
return state;
}
static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
struct snd_sst_alloc_params_ext *alloc_param)
{
unsigned int channels;
snd_pcm_uframes_t period_size;
ssize_t periodbytes;
ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
channels = substream->runtime->channels;
period_size = substream->runtime->period_size;
periodbytes = samples_to_bytes(substream->runtime, period_size);
alloc_param->ring_buf_info[0].addr = buffer_addr;
alloc_param->ring_buf_info[0].size = buffer_bytes;
alloc_param->sg_count = 1;
alloc_param->reserved = 0;
alloc_param->frag_size = periodbytes * channels;
}
static void sst_fill_pcm_params(struct snd_pcm_substream *substream,
struct snd_sst_stream_params *param)
{
param->uc.pcm_params.num_chan = (u8) substream->runtime->channels;
param->uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
param->uc.pcm_params.sfreq = substream->runtime->rate;
/* PCM stream via ALSA interface */
param->uc.pcm_params.use_offload_path = 0;
param->uc.pcm_params.reserved2 = 0;
memset(param->uc.pcm_params.channel_map, 0, sizeof(u8));
}
static int sst_get_stream_mapping(int dev, int sdev, int dir,
struct sst_dev_stream_map *map, int size)
{
int i;
if (map == NULL)
return -EINVAL;
/* index 0 is not used in stream map */
for (i = 1; i < size; i++) {
if ((map[i].dev_num == dev) && (map[i].direction == dir))
return i;
}
return 0;
}
int sst_fill_stream_params(void *substream,
const struct sst_data *ctx, struct snd_sst_params *str_params, bool is_compress)
{
int map_size;
int index;
struct sst_dev_stream_map *map;
struct snd_pcm_substream *pstream = NULL;
struct snd_compr_stream *cstream = NULL;
map = ctx->pdata->pdev_strm_map;
map_size = ctx->pdata->strm_map_size;
if (is_compress == true)
cstream = (struct snd_compr_stream *)substream;
else
pstream = (struct snd_pcm_substream *)substream;
str_params->stream_type = SST_STREAM_TYPE_MUSIC;
/* For pcm streams */
if (pstream) {
index = sst_get_stream_mapping(pstream->pcm->device,
pstream->number, pstream->stream,
map, map_size);
if (index <= 0)
return -EINVAL;
str_params->stream_id = index;
str_params->device_type = map[index].device_id;
str_params->task = map[index].task_id;
str_params->ops = (u8)pstream->stream;
}
if (cstream) {
index = sst_get_stream_mapping(cstream->device->device,
0, cstream->direction,
map, map_size);
if (index <= 0)
return -EINVAL;
str_params->stream_id = index;
str_params->device_type = map[index].device_id;
str_params->task = map[index].task_id;
str_params->ops = (u8)cstream->direction;
}
return 0;
}
static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sst_runtime_stream *stream =
substream->runtime->private_data;
struct snd_sst_stream_params param = {{{0,},},};
struct snd_sst_params str_params = {0};
struct snd_sst_alloc_params_ext alloc_params = {0};
int ret_val = 0;
struct sst_data *ctx = snd_soc_dai_get_drvdata(dai);
/* set codec params and inform SST driver the same */
sst_fill_pcm_params(substream, ¶m);
sst_fill_alloc_params(substream, &alloc_params);
substream->runtime->dma_area = substream->dma_buffer.area;
str_params.sparams = param;
str_params.aparams = alloc_params;
str_params.codec = SST_CODEC_TYPE_PCM;
/* fill the device type and stream id to pass to SST driver */
ret_val = sst_fill_stream_params(substream, ctx, &str_params, false);
if (ret_val < 0)
return ret_val;
stream->stream_info.str_id = str_params.stream_id;
ret_val = stream->ops->open(sst->dev, &str_params);
if (ret_val <= 0)
return ret_val;
return ret_val;
}
static void sst_period_elapsed(void *arg)
{
struct snd_pcm_substream *substream = arg;
struct sst_runtime_stream *stream;
int status;
if (!substream || !substream->runtime)
return;
stream = substream->runtime->private_data;
if (!stream)
return;
status = sst_get_stream_status(stream);
if (status != SST_PLATFORM_RUNNING)
return;
snd_pcm_period_elapsed(substream);
}
static int sst_platform_init_stream(struct snd_pcm_substream *substream)
{
struct sst_runtime_stream *stream =
substream->runtime->private_data;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int ret_val;
dev_dbg(rtd->dev, "setting buffer ptr param\n");
sst_set_stream_status(stream, SST_PLATFORM_INIT);
stream->stream_info.period_elapsed = sst_period_elapsed;
stream->stream_info.arg = substream;
stream->stream_info.buffer_ptr = 0;
stream->stream_info.sfreq = substream->runtime->rate;
ret_val = stream->ops->stream_init(sst->dev, &stream->stream_info);
if (ret_val)
dev_err(rtd->dev, "control_set ret error %d\n", ret_val);
return ret_val;
}
static int power_up_sst(struct sst_runtime_stream *stream)
{
return stream->ops->power(sst->dev, true);
}
static void power_down_sst(struct sst_runtime_stream *stream)
{
stream->ops->power(sst->dev, false);
}
static int sst_media_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int ret_val = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct sst_runtime_stream *stream;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
return -ENOMEM;
spin_lock_init(&stream->status_lock);
/* get the sst ops */
mutex_lock(&sst_lock);
if (!sst ||
!try_module_get(sst->dev->driver->owner)) {
dev_err(dai->dev, "no device available to run\n");
ret_val = -ENODEV;
goto out_ops;
}
stream->ops = sst->ops;
mutex_unlock(&sst_lock);
stream->stream_info.str_id = 0;
stream->stream_info.arg = substream;
/* allocate memory for SST API set */
runtime->private_data = stream;
ret_val = power_up_sst(stream);
if (ret_val < 0)
return ret_val;
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIODS, 2);
return snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
out_ops:
kfree(stream);
mutex_unlock(&sst_lock);
return ret_val;
}
static void sst_media_close(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sst_runtime_stream *stream;
int ret_val = 0, str_id;
stream = substream->runtime->private_data;
power_down_sst(stream);
str_id = stream->stream_info.str_id;
if (str_id)
ret_val = stream->ops->close(sst->dev, str_id);
module_put(sst->dev->driver->owner);
kfree(stream);
}
static inline unsigned int get_current_pipe_id(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream)
{
struct sst_data *sst = snd_soc_dai_get_drvdata(dai);
struct sst_dev_stream_map *map = sst->pdata->pdev_strm_map;
struct sst_runtime_stream *stream =
substream->runtime->private_data;
u32 str_id = stream->stream_info.str_id;
unsigned int pipe_id;
pipe_id = map[str_id].device_id;
dev_dbg(dai->dev, "got pipe_id = %#x for str_id = %d\n",
pipe_id, str_id);
return pipe_id;
}
static int sst_media_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sst_runtime_stream *stream;
int ret_val = 0, str_id;
stream = substream->runtime->private_data;
str_id = stream->stream_info.str_id;
if (stream->stream_info.str_id) {
ret_val = stream->ops->stream_drop(sst->dev, str_id);
return ret_val;
}
ret_val = sst_platform_alloc_stream(substream, dai);
if (ret_val <= 0)
return ret_val;
snprintf(substream->pcm->id, sizeof(substream->pcm->id),
"%d", stream->stream_info.str_id);
ret_val = sst_platform_init_stream(substream);
if (ret_val)
return ret_val;
substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
return ret_val;
}
static int sst_media_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
return 0;
}
static int sst_media_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
return snd_pcm_lib_free_pages(substream);
}
static struct snd_soc_dai_ops sst_media_dai_ops = {
.startup = sst_media_open,
.shutdown = sst_media_close,
.prepare = sst_media_prepare,
.hw_params = sst_media_hw_params,
.hw_free = sst_media_hw_free,
};
static int sst_platform_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
if (substream->pcm->internal)
return 0;
runtime = substream->runtime;
runtime->hw = sst_platform_pcm_hw;
return 0;
}
static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
int cmd)
{
int ret_val = 0, str_id;
struct sst_runtime_stream *stream;
int status;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
dev_dbg(rtd->dev, "sst_platform_pcm_trigger called\n");
if (substream->pcm->internal)
return 0;
stream = substream->runtime->private_data;
str_id = stream->stream_info.str_id;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
dev_dbg(rtd->dev, "sst: Trigger Start\n");
status = SST_PLATFORM_RUNNING;
stream->stream_info.arg = substream;
ret_val = stream->ops->stream_start(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_STOP:
dev_dbg(rtd->dev, "sst: in stop\n");
status = SST_PLATFORM_DROPPED;
ret_val = stream->ops->stream_drop(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
dev_dbg(rtd->dev, "sst: in pause\n");
status = SST_PLATFORM_PAUSED;
ret_val = stream->ops->stream_pause(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
dev_dbg(rtd->dev, "sst: in pause release\n");
status = SST_PLATFORM_RUNNING;
ret_val = stream->ops->stream_pause_release(sst->dev, str_id);
break;
default:
return -EINVAL;
}
if (!ret_val)
sst_set_stream_status(stream, status);
return ret_val;
}
static snd_pcm_uframes_t sst_platform_pcm_pointer
(struct snd_pcm_substream *substream)
{
struct sst_runtime_stream *stream;
int ret_val, status;
struct pcm_stream_info *str_info;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
stream = substream->runtime->private_data;
status = sst_get_stream_status(stream);
if (status == SST_PLATFORM_INIT)
return 0;
str_info = &stream->stream_info;
ret_val = stream->ops->stream_read_tstamp(sst->dev, str_info);
if (ret_val) {
dev_err(rtd->dev, "sst: error code = %d\n", ret_val);
return ret_val;
}
substream->runtime->delay = str_info->pcm_delay;
return str_info->buffer_ptr;
}
static struct snd_pcm_ops sst_platform_ops = {
.open = sst_platform_open,
.ioctl = snd_pcm_lib_ioctl,
.trigger = sst_platform_pcm_trigger,
.pointer = sst_platform_pcm_pointer,
};
static void sst_pcm_free(struct snd_pcm *pcm)
{
dev_dbg(pcm->dev, "sst_pcm_free called\n");
snd_pcm_lib_preallocate_free_for_all(pcm);
}
static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
int retval = 0;
if (dai->driver->playback.channels_min ||
dai->driver->capture.channels_min) {
retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_DMA),
SST_MIN_BUFFER, SST_MAX_BUFFER);
if (retval) {
dev_err(rtd->dev, "dma buffer allocationf fail\n");
return retval;
}
}
return retval;
}
static int sst_soc_probe(struct snd_soc_platform *platform)
{
return sst_dsp_init_v2_dpcm(platform);
}
static struct snd_soc_platform_driver sst_soc_platform_drv = {
.probe = sst_soc_probe,
.ops = &sst_platform_ops,
.compr_ops = &sst_platform_compr_ops,
.pcm_new = sst_pcm_new,
.pcm_free = sst_pcm_free,
};
static const struct snd_soc_component_driver sst_component = {
.name = "sst",
};
static int sst_platform_probe(struct platform_device *pdev)
{
struct sst_data *drv;
int ret;
struct sst_platform_data *pdata;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (drv == NULL) {
return -ENOMEM;
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (pdata == NULL) {
return -ENOMEM;
}
pdata->pdev_strm_map = dpcm_strm_map;
pdata->strm_map_size = ARRAY_SIZE(dpcm_strm_map);
drv->pdata = pdata;
mutex_init(&drv->lock);
dev_set_drvdata(&pdev->dev, drv);
ret = snd_soc_register_platform(&pdev->dev, &sst_soc_platform_drv);
if (ret) {
dev_err(&pdev->dev, "registering soc platform failed\n");
return ret;
}
ret = snd_soc_register_component(&pdev->dev, &sst_component,
sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
if (ret) {
dev_err(&pdev->dev, "registering cpu dais failed\n");
snd_soc_unregister_platform(&pdev->dev);
}
return ret;
}
static int sst_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_component(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
dev_dbg(&pdev->dev, "sst_platform_remove success\n");
return 0;
}
static struct platform_driver sst_platform_driver = {
.driver = {
.name = "sst-mfld-platform",
.owner = THIS_MODULE,
},
.probe = sst_platform_probe,
.remove = sst_platform_remove,
};
module_platform_driver(sst_platform_driver);
MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver");
MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sst-mfld-platform");
| gpl-2.0 |
mukulsoni/android_kernel_samsung_ms013g-cm11 | drivers/sensorhub/stm_hestia/factory/gesture_max88920.c | 835 | 4183 | /*
* Copyright (C) 2012, Samsung Electronics Co. Ltd. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "../ssp.h"
#define VENDOR "MAXIM"
#define CHIP_ID "MAX88920"
static ssize_t gestrue_vendor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", VENDOR);
}
static ssize_t gestrue_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", CHIP_ID);
}
static ssize_t raw_data_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d,%d,%d,%d\n",
data->buf[GESTURE_SENSOR].data[0],
data->buf[GESTURE_SENSOR].data[1],
data->buf[GESTURE_SENSOR].data[2],
data->buf[GESTURE_SENSOR].data[3]);
}
static ssize_t gesture_get_selftest_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
s16 raw_A = 0, raw_B = 0, raw_C = 0, raw_D = 0;
int iRet = 0;
char chTempBuf[4] = { 0, };
struct ssp_data *data = dev_get_drvdata(dev);
struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL);
msg->cmd = GESTURE_FACTORY;
msg->length = 4;
msg->options = AP2HUB_READ;
msg->buffer = chTempBuf;
msg->free_buffer = 0;
iRet = ssp_spi_sync(data, msg, 2000);
if (iRet != SUCCESS) {
pr_err("[SSP]: %s - Gesture Selftest Timeout!!\n", __func__);
goto exit;
}
raw_A = chTempBuf[0];
raw_B = chTempBuf[1];
raw_C = chTempBuf[2];
raw_D = chTempBuf[3];
pr_info("[SSP] %s: self test A = %d, B = %d, C = %d, D = %d\n",
__func__, raw_A, raw_B, raw_C, raw_D);
exit: return sprintf(buf, "%d,%d,%d,%d\n", raw_A, raw_B, raw_C, raw_D);
}
static ssize_t ir_current_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssp_data *data = dev_get_drvdata(dev);
ssp_dbg("[SSP]: %s - Ir_Current Setting = %d\n",
__func__, data->uIr_Current);
return sprintf(buf, "%d\n", data->uIr_Current);
}
static ssize_t ir_current_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
u16 uNewIrCurrent = DEFUALT_IR_CURRENT;
int iRet = 0;
u16 current_index = 0;
struct ssp_data *data = dev_get_drvdata(dev);
static u16 set_current[2][16] = { {0, 25, 50, 75, 100, 125, 150, 175, 225, 250, 275, 300, 325, 350, 375, 400},
{2, 28, 34, 50, 66, 82, 98, 114, 130, 146, 162, 178, 194, 210, 226, 242} };
iRet = kstrtou16(buf, 10, &uNewIrCurrent);
if (iRet < 0)
pr_err("[SSP]: %s - kstrtoint failed.(%d)\n", __func__, iRet);
else {
for(current_index = 0; current_index < 16; current_index++) {
if (set_current[0][current_index] == uNewIrCurrent) {
data->uIr_Current = set_current[1][current_index];
}
}
set_gesture_current(data, data->uIr_Current);
data->uIr_Current= uNewIrCurrent;
}
ssp_dbg("[SSP]: %s - new Ir_Current Setting : %d\n",
__func__, data->uIr_Current);
return size;
}
static DEVICE_ATTR(vendor, S_IRUGO, gestrue_vendor_show, NULL);
static DEVICE_ATTR(name, S_IRUGO, gestrue_name_show, NULL);
static DEVICE_ATTR(raw_data, S_IRUGO, raw_data_read, NULL);
static DEVICE_ATTR(selftest, S_IRUGO, gesture_get_selftest_show, NULL);
static DEVICE_ATTR(ir_current, S_IRUGO | S_IWUSR | S_IWGRP,
ir_current_show, ir_current_store);
static struct device_attribute *gesture_attrs[] = {
&dev_attr_vendor,
&dev_attr_name,
&dev_attr_raw_data,
&dev_attr_selftest,
&dev_attr_ir_current,
NULL,
};
void initialize_gesture_factorytest(struct ssp_data *data)
{
sensors_register(data->ges_device, data,
gesture_attrs, "gesture_sensor");
}
void remove_gesture_factorytest(struct ssp_data *data)
{
sensors_unregister(data->ges_device, gesture_attrs);
}
| gpl-2.0 |
Team-Blackout/Blackout-M7 | drivers/hwmon/ads7871.c | 1603 | 7064 | /*
* ads7871 - driver for TI ADS7871 A/D converter
*
* Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 or
* later as publishhed by the Free Software Foundation.
*
* You need to have something like this in struct spi_board_info
* {
* .modalias = "ads7871",
* .max_speed_hz = 2*1000*1000,
* .chip_select = 0,
* .bus_num = 1,
* },
*/
/*From figure 18 in the datasheet*/
/*Register addresses*/
#define REG_LS_BYTE 0 /*A/D Output Data, LS Byte*/
#define REG_MS_BYTE 1 /*A/D Output Data, MS Byte*/
#define REG_PGA_VALID 2 /*PGA Valid Register*/
#define REG_AD_CONTROL 3 /*A/D Control Register*/
#define REG_GAIN_MUX 4 /*Gain/Mux Register*/
#define REG_IO_STATE 5 /*Digital I/O State Register*/
#define REG_IO_CONTROL 6 /*Digital I/O Control Register*/
#define REG_OSC_CONTROL 7 /*Rev/Oscillator Control Register*/
#define REG_SER_CONTROL 24 /*Serial Interface Control Register*/
#define REG_ID 31 /*ID Register*/
/*
* From figure 17 in the datasheet
* These bits get ORed with the address to form
* the instruction byte
*/
/*Instruction Bit masks*/
#define INST_MODE_bm (1<<7)
#define INST_READ_bm (1<<6)
#define INST_16BIT_bm (1<<5)
/*From figure 18 in the datasheet*/
/*bit masks for Rev/Oscillator Control Register*/
#define MUX_CNV_bv 7
#define MUX_CNV_bm (1<<MUX_CNV_bv)
#define MUX_M3_bm (1<<3) /*M3 selects single ended*/
#define MUX_G_bv 4 /*allows for reg = (gain << MUX_G_bv) | ...*/
/*From figure 18 in the datasheet*/
/*bit masks for Rev/Oscillator Control Register*/
#define OSC_OSCR_bm (1<<5)
#define OSC_OSCE_bm (1<<4)
#define OSC_REFE_bm (1<<3)
#define OSC_BUFE_bm (1<<2)
#define OSC_R2V_bm (1<<1)
#define OSC_RBG_bm (1<<0)
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#define DEVICE_NAME "ads7871"
struct ads7871_data {
struct device *hwmon_dev;
struct mutex update_lock;
};
static int ads7871_read_reg8(struct spi_device *spi, int reg)
{
int ret;
reg = reg | INST_READ_bm;
ret = spi_w8r8(spi, reg);
return ret;
}
static int ads7871_read_reg16(struct spi_device *spi, int reg)
{
int ret;
reg = reg | INST_READ_bm | INST_16BIT_bm;
ret = spi_w8r16(spi, reg);
return ret;
}
static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
{
u8 tmp[2] = {reg, val};
return spi_write(spi, tmp, sizeof(tmp));
}
static ssize_t show_voltage(struct device *dev,
struct device_attribute *da, char *buf)
{
struct spi_device *spi = to_spi_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int ret, val, i = 0;
uint8_t channel, mux_cnv;
channel = attr->index;
/*
* TODO: add support for conversions
* other than single ended with a gain of 1
*/
/*MUX_M3_bm forces single ended*/
/*This is also where the gain of the PGA would be set*/
ads7871_write_reg8(spi, REG_GAIN_MUX,
(MUX_CNV_bm | MUX_M3_bm | channel));
ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
/*
* on 400MHz arm9 platform the conversion
* is already done when we do this test
*/
while ((i < 2) && mux_cnv) {
i++;
ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
msleep_interruptible(1);
}
if (mux_cnv == 0) {
val = ads7871_read_reg16(spi, REG_LS_BYTE);
/*result in volts*10000 = (val/8192)*2.5*10000*/
val = ((val>>2) * 25000) / 8192;
return sprintf(buf, "%d\n", val);
} else {
return -1;
}
}
static ssize_t ads7871_show_name(struct device *dev,
struct device_attribute *devattr, char *buf)
{
return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
}
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4);
static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
static struct attribute *ads7871_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
&dev_attr_name.attr,
NULL
};
static const struct attribute_group ads7871_group = {
.attrs = ads7871_attributes,
};
static int __devinit ads7871_probe(struct spi_device *spi)
{
int ret, err;
uint8_t val;
struct ads7871_data *pdata;
dev_dbg(&spi->dev, "probe\n");
/* Configure the SPI bus */
spi->mode = (SPI_MODE_0);
spi->bits_per_word = 8;
spi_setup(spi);
ads7871_write_reg8(spi, REG_SER_CONTROL, 0);
ads7871_write_reg8(spi, REG_AD_CONTROL, 0);
val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm);
ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
/*
* because there is no other error checking on an SPI bus
* we need to make sure we really have a chip
*/
if (val != ret) {
err = -ENODEV;
goto exit;
}
pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
if (!pdata) {
err = -ENOMEM;
goto exit;
}
err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
if (err < 0)
goto error_free;
spi_set_drvdata(spi, pdata);
pdata->hwmon_dev = hwmon_device_register(&spi->dev);
if (IS_ERR(pdata->hwmon_dev)) {
err = PTR_ERR(pdata->hwmon_dev);
goto error_remove;
}
return 0;
error_remove:
sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
error_free:
kfree(pdata);
exit:
return err;
}
static int __devexit ads7871_remove(struct spi_device *spi)
{
struct ads7871_data *pdata = spi_get_drvdata(spi);
hwmon_device_unregister(pdata->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
kfree(pdata);
return 0;
}
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
.owner = THIS_MODULE,
},
.probe = ads7871_probe,
.remove = __devexit_p(ads7871_remove),
};
module_spi_driver(ads7871_driver);
MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>");
MODULE_DESCRIPTION("TI ADS7871 A/D driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zeroblade1984/msm8939-yureka-kk | arch/s390/pci/pci_dma.c | 1859 | 12616 | /*
* Copyright IBM Corp. 2012
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <asm/pci_dma.h>
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
static unsigned long *dma_alloc_cpu_table(void)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
if (!table)
return NULL;
for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
*entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
return table;
}
static void dma_free_cpu_table(void *table)
{
kmem_cache_free(dma_region_table_cache, table);
}
static unsigned long *dma_alloc_page_table(void)
{
unsigned long *table, *entry;
table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
if (!table)
return NULL;
for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
*entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
return table;
}
static void dma_free_page_table(void *table)
{
kmem_cache_free(dma_page_table_cache, table);
}
static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
{
unsigned long *sto;
if (reg_entry_isvalid(*entry))
sto = get_rt_sto(*entry);
else {
sto = dma_alloc_cpu_table();
if (!sto)
return NULL;
set_rt_sto(entry, sto);
validate_rt_entry(entry);
entry_clr_protected(entry);
}
return sto;
}
static unsigned long *dma_get_page_table_origin(unsigned long *entry)
{
unsigned long *pto;
if (reg_entry_isvalid(*entry))
pto = get_st_pto(*entry);
else {
pto = dma_alloc_page_table();
if (!pto)
return NULL;
set_st_pto(entry, pto);
validate_st_entry(entry);
entry_clr_protected(entry);
}
return pto;
}
static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
{
unsigned long *sto, *pto;
unsigned int rtx, sx, px;
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx]);
if (!sto)
return NULL;
sx = calc_sx(dma_addr);
pto = dma_get_page_table_origin(&sto[sx]);
if (!pto)
return NULL;
px = calc_px(dma_addr);
return &pto[px];
}
static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
dma_addr_t dma_addr, int flags)
{
unsigned long *entry;
entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
if (!entry) {
WARN_ON_ONCE(1);
return;
}
if (flags & ZPCI_PTE_INVALID) {
invalidate_pt_entry(entry);
return;
} else {
set_pt_pfaa(entry, page_addr);
validate_pt_entry(entry);
}
if (flags & ZPCI_TABLE_PROTECTED)
entry_set_protected(entry);
else
entry_clr_protected(entry);
}
static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
dma_addr_t dma_addr, size_t size, int flags)
{
unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags;
int i, rc = 0;
if (!nr_pages)
return -EINVAL;
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
if (!zdev->dma_table) {
dev_err(&zdev->pdev->dev, "Missing DMA table\n");
goto no_refresh;
}
for (i = 0; i < nr_pages; i++) {
dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
/*
* rpcit is not required to establish new translations when previously
* invalid translation-table entries are validated, however it is
* required when altering previously valid entries.
*/
if (!zdev->tlb_refresh &&
((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
/*
* TODO: also need to check that the old entry is indeed INVALID
* and not only for one page but for the whole range...
* -> now we WARN_ON in that case but with lazy unmap that
* needs to be redone!
*/
goto no_refresh;
rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
return rc;
}
static void dma_free_seg_table(unsigned long entry)
{
unsigned long *sto = get_rt_sto(entry);
int sx;
for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
if (reg_entry_isvalid(sto[sx]))
dma_free_page_table(get_st_pto(sto[sx]));
dma_free_cpu_table(sto);
}
static void dma_cleanup_tables(struct zpci_dev *zdev)
{
unsigned long *table;
int rtx;
if (!zdev || !zdev->dma_table)
return;
table = zdev->dma_table;
for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
if (reg_entry_isvalid(table[rtx]))
dma_free_seg_table(table[rtx]);
dma_free_cpu_table(table);
zdev->dma_table = NULL;
}
static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
int size)
{
unsigned long boundary_size = 0x1000000;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, 0, boundary_size, 0);
}
static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
{
unsigned long offset, flags;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
if (offset == -1)
offset = __dma_alloc_iommu(zdev, 0, size);
if (offset != -1) {
zdev->next_bit = offset + size;
if (zdev->next_bit >= zdev->iommu_pages)
zdev->next_bit = 0;
}
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
return offset;
}
static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
{
unsigned long flags;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
if (!zdev->iommu_bitmap)
goto out;
bitmap_clear(zdev->iommu_bitmap, offset, size);
if (offset >= zdev->next_bit)
zdev->next_bit = offset + size;
out:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
}
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
EXPORT_SYMBOL_GPL(dma_set_mask);
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
unsigned long nr_pages, iommu_page_index;
unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
if (iommu_page_index == -1)
goto out_err;
/* Use rounded up size */
size = nr_pages * PAGE_SIZE;
dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
if (dma_addr + size > zdev->end_dma) {
dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
dma_addr, size, zdev->end_dma);
goto out_free;
}
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
return dma_addr + (offset & ~PAGE_MASK);
}
out_free:
dma_free_iommu(zdev, iommu_page_index, nr_pages);
out_err:
dev_err(dev, "Failed to map addr: %lx\n", pa);
return DMA_ERROR_CODE;
}
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
unsigned long iommu_page_index;
int npages;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr = dma_addr & PAGE_MASK;
if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
dma_free_iommu(zdev, iommu_page_index, npages);
}
static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
struct page *page;
unsigned long pa;
dma_addr_t map;
size = PAGE_ALIGN(size);
page = alloc_pages(flag, get_order(size));
if (!page)
return NULL;
atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
pa = page_to_phys(page);
memset((void *) pa, 0, size);
map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
size, DMA_BIDIRECTIONAL, NULL);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
return NULL;
}
if (dma_handle)
*dma_handle = map;
return (void *) pa;
}
static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long) pa, get_order(size));
}
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
int mapped_elements = 0;
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
struct page *page = sg_page(s);
s->dma_address = s390_dma_map_pages(dev, page, s->offset,
s->length, dir, NULL);
if (!dma_mapping_error(dev, s->dma_address)) {
s->dma_length = s->length;
mapped_elements++;
} else
goto unmap;
}
out:
return mapped_elements;
unmap:
for_each_sg(sg, s, mapped_elements, i) {
if (s->dma_address)
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
dir, NULL);
s->dma_address = 0;
s->dma_length = 0;
}
mapped_elements = 0;
goto out;
}
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
s->dma_address = 0;
s->dma_length = 0;
}
}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
unsigned int bitmap_order;
int rc;
spin_lock_init(&zdev->iommu_bitmap_lock);
spin_lock_init(&zdev->dma_table_lock);
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
goto out_clean;
}
zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
bitmap_order = get_order(zdev->iommu_pages / 8);
pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
zdev->iommu_size, zdev->iommu_pages, bitmap_order);
zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
bitmap_order);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto out_reg;
}
rc = zpci_register_ioat(zdev,
0,
zdev->start_dma + PAGE_OFFSET,
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table);
if (rc)
goto out_reg;
return 0;
out_reg:
dma_free_cpu_table(zdev->dma_table);
out_clean:
return rc;
}
void zpci_dma_exit_device(struct zpci_dev *zdev)
{
zpci_unregister_ioat(zdev, 0);
dma_cleanup_tables(zdev);
free_pages((unsigned long) zdev->iommu_bitmap,
get_order(zdev->iommu_pages / 8));
zdev->iommu_bitmap = NULL;
zdev->next_bit = 0;
}
static int __init dma_alloc_cpu_table_caches(void)
{
dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
0, NULL);
if (!dma_region_table_cache)
return -ENOMEM;
dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
0, NULL);
if (!dma_page_table_cache) {
kmem_cache_destroy(dma_region_table_cache);
return -ENOMEM;
}
return 0;
}
int __init zpci_dma_init(void)
{
return dma_alloc_cpu_table_caches();
}
void zpci_dma_exit(void)
{
kmem_cache_destroy(dma_page_table_cache);
kmem_cache_destroy(dma_region_table_cache);
}
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init dma_debug_do_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(dma_debug_do_init);
struct dma_map_ops s390_dma_ops = {
.alloc = s390_dma_alloc,
.free = s390_dma_free,
.map_sg = s390_dma_map_sg,
.unmap_sg = s390_dma_unmap_sg,
.map_page = s390_dma_map_pages,
.unmap_page = s390_dma_unmap_pages,
/* if we support direct DMA this must be conditional */
.is_phys = 0,
/* dma_supported is unconditionally true without a callback */
};
EXPORT_SYMBOL_GPL(s390_dma_ops);
| gpl-2.0 |
ShinySide/SM-A700F | arch/arm/mach-exynos/dev-audio.c | 2115 | 6177 | /* linux/arch/arm/mach-exynos4/dev-audio.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright (c) 2010 Samsung Electronics Co. Ltd
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/platform_data/asoc-s3c.h>
#include <plat/gpio-cfg.h>
#include <mach/map.h>
#include <mach/dma.h>
#include <mach/irqs.h>
#define EXYNOS4_AUDSS_INT_MEM (0x03000000)
static int exynos4_cfg_i2s(struct platform_device *pdev)
{
/* configure GPIO for i2s port */
switch (pdev->id) {
case 0:
s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 7, S3C_GPIO_SFN(2));
break;
case 1:
s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(2));
break;
case 2:
s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(4));
break;
default:
printk(KERN_ERR "Invalid Device %d\n", pdev->id);
return -EINVAL;
}
return 0;
}
static struct s3c_audio_pdata i2sv5_pdata = {
.cfg_gpio = exynos4_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
| QUIRK_NEED_RSTCLR,
.idma_addr = EXYNOS4_AUDSS_INT_MEM,
},
},
};
static struct resource exynos4_i2s0_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_I2S0, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_I2S0_TX),
[2] = DEFINE_RES_DMA(DMACH_I2S0_RX),
[3] = DEFINE_RES_DMA(DMACH_I2S0S_TX),
};
struct platform_device exynos4_device_i2s0 = {
.name = "samsung-i2s",
.id = 0,
.num_resources = ARRAY_SIZE(exynos4_i2s0_resource),
.resource = exynos4_i2s0_resource,
.dev = {
.platform_data = &i2sv5_pdata,
},
};
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = exynos4_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_NO_MUXPSR,
},
},
};
static struct resource exynos4_i2s1_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_I2S1, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_I2S1_TX),
[2] = DEFINE_RES_DMA(DMACH_I2S1_RX),
};
struct platform_device exynos4_device_i2s1 = {
.name = "samsung-i2s",
.id = 1,
.num_resources = ARRAY_SIZE(exynos4_i2s1_resource),
.resource = exynos4_i2s1_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
static struct resource exynos4_i2s2_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_I2S2, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_I2S2_TX),
[2] = DEFINE_RES_DMA(DMACH_I2S2_RX),
};
struct platform_device exynos4_device_i2s2 = {
.name = "samsung-i2s",
.id = 2,
.num_resources = ARRAY_SIZE(exynos4_i2s2_resource),
.resource = exynos4_i2s2_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
/* PCM Controller platform_devices */
static int exynos4_pcm_cfg_gpio(struct platform_device *pdev)
{
switch (pdev->id) {
case 0:
s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 5, S3C_GPIO_SFN(3));
break;
case 1:
s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(3));
break;
case 2:
s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(3));
break;
default:
printk(KERN_DEBUG "Invalid PCM Controller number!");
return -EINVAL;
}
return 0;
}
static struct s3c_audio_pdata s3c_pcm_pdata = {
.cfg_gpio = exynos4_pcm_cfg_gpio,
};
static struct resource exynos4_pcm0_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_PCM0, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_PCM0_TX),
[2] = DEFINE_RES_DMA(DMACH_PCM0_RX),
};
struct platform_device exynos4_device_pcm0 = {
.name = "samsung-pcm",
.id = 0,
.num_resources = ARRAY_SIZE(exynos4_pcm0_resource),
.resource = exynos4_pcm0_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
static struct resource exynos4_pcm1_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_PCM1, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_PCM1_TX),
[2] = DEFINE_RES_DMA(DMACH_PCM1_RX),
};
struct platform_device exynos4_device_pcm1 = {
.name = "samsung-pcm",
.id = 1,
.num_resources = ARRAY_SIZE(exynos4_pcm1_resource),
.resource = exynos4_pcm1_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
static struct resource exynos4_pcm2_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_PCM2, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_PCM2_TX),
[2] = DEFINE_RES_DMA(DMACH_PCM2_RX),
};
struct platform_device exynos4_device_pcm2 = {
.name = "samsung-pcm",
.id = 2,
.num_resources = ARRAY_SIZE(exynos4_pcm2_resource),
.resource = exynos4_pcm2_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
/* AC97 Controller platform devices */
static int exynos4_ac97_cfg_gpio(struct platform_device *pdev)
{
return s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(4));
}
static struct resource exynos4_ac97_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_AC97, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_AC97_PCMOUT),
[2] = DEFINE_RES_DMA(DMACH_AC97_PCMIN),
[3] = DEFINE_RES_DMA(DMACH_AC97_MICIN),
[4] = DEFINE_RES_IRQ(EXYNOS4_IRQ_AC97),
};
static struct s3c_audio_pdata s3c_ac97_pdata = {
.cfg_gpio = exynos4_ac97_cfg_gpio,
};
static u64 exynos4_ac97_dmamask = DMA_BIT_MASK(32);
struct platform_device exynos4_device_ac97 = {
.name = "samsung-ac97",
.id = -1,
.num_resources = ARRAY_SIZE(exynos4_ac97_resource),
.resource = exynos4_ac97_resource,
.dev = {
.platform_data = &s3c_ac97_pdata,
.dma_mask = &exynos4_ac97_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/* S/PDIF Controller platform_device */
static int exynos4_spdif_cfg_gpio(struct platform_device *pdev)
{
s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4));
return 0;
}
static struct resource exynos4_spdif_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_SPDIF, SZ_256),
[1] = DEFINE_RES_DMA(DMACH_SPDIF),
};
static struct s3c_audio_pdata samsung_spdif_pdata = {
.cfg_gpio = exynos4_spdif_cfg_gpio,
};
static u64 exynos4_spdif_dmamask = DMA_BIT_MASK(32);
struct platform_device exynos4_device_spdif = {
.name = "samsung-spdif",
.id = -1,
.num_resources = ARRAY_SIZE(exynos4_spdif_resource),
.resource = exynos4_spdif_resource,
.dev = {
.platform_data = &samsung_spdif_pdata,
.dma_mask = &exynos4_spdif_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
| gpl-2.0 |
qtekfun/htcDesire820Kernel | kernel/drivers/net/ethernet/ti/cpsw_ale.c | 2371 | 17526 | /*
* Texas Instruments 3-Port Ethernet Switch Address Lookup Engine
*
* Copyright (C) 2012 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
#include <linux/etherdevice.h>
#include "cpsw_ale.h"
#define BITMASK(bits) (BIT(bits) - 1)
#define ALE_ENTRY_BITS 68
#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff)
#define ALE_VERSION_MINOR(rev) (rev & 0xff)
/* ALE Registers */
#define ALE_IDVER 0x00
#define ALE_CONTROL 0x08
#define ALE_PRESCALE 0x10
#define ALE_UNKNOWNVLAN 0x18
#define ALE_TABLE_CONTROL 0x20
#define ALE_TABLE 0x34
#define ALE_PORTCTL 0x40
#define ALE_TABLE_WRITE BIT(31)
#define ALE_TYPE_FREE 0
#define ALE_TYPE_ADDR 1
#define ALE_TYPE_VLAN 2
#define ALE_TYPE_VLAN_ADDR 3
#define ALE_UCAST_PERSISTANT 0
#define ALE_UCAST_UNTOUCHED 1
#define ALE_UCAST_OUI 2
#define ALE_UCAST_TOUCHED 3
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
int idx;
idx = start / 32;
start -= idx * 32;
idx = 2 - idx; /* flip */
return (ale_entry[idx] >> start) & BITMASK(bits);
}
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
int idx;
value &= BITMASK(bits);
idx = start / 32;
start -= idx * 32;
idx = 2 - idx; /* flip */
ale_entry[idx] &= ~(BITMASK(bits) << start);
ale_entry[idx] |= (value << start);
}
#define DEFINE_ALE_FIELD(name, start, bits) \
static inline int cpsw_ale_get_##name(u32 *ale_entry) \
{ \
return cpsw_ale_get_field(ale_entry, start, bits); \
} \
static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
{ \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
DEFINE_ALE_FIELD(entry_type, 60, 2)
DEFINE_ALE_FIELD(vlan_id, 48, 12)
DEFINE_ALE_FIELD(mcast_state, 62, 2)
DEFINE_ALE_FIELD(port_mask, 66, 3)
DEFINE_ALE_FIELD(super, 65, 1)
DEFINE_ALE_FIELD(ucast_type, 62, 2)
DEFINE_ALE_FIELD(port_num, 66, 2)
DEFINE_ALE_FIELD(blocked, 65, 1)
DEFINE_ALE_FIELD(secure, 64, 1)
DEFINE_ALE_FIELD(vlan_untag_force, 24, 3)
DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3)
DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3)
DEFINE_ALE_FIELD(vlan_member_list, 0, 3)
DEFINE_ALE_FIELD(mcast, 40, 1)
/* The MAC address field in the ALE entry cannot be macroized as above */
static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
{
int i;
for (i = 0; i < 6; i++)
addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
}
static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
{
int i;
for (i = 0; i < 6; i++)
cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
}
static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
{
int i;
WARN_ON(idx > ale->params.ale_entries);
__raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
ale_entry[i] = __raw_readl(ale->params.ale_regs +
ALE_TABLE + 4 * i);
return idx;
}
static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
{
int i;
WARN_ON(idx > ale->params.ale_entries);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
__raw_writel(ale_entry[i], ale->params.ale_regs +
ALE_TABLE + 4 * i);
__raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
ALE_TABLE_CONTROL);
return idx;
}
int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
u8 entry_addr[6];
cpsw_ale_read(ale, idx, ale_entry);
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
continue;
if (cpsw_ale_get_vlan_id(ale_entry) != vid)
continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
if (memcmp(entry_addr, addr, 6) == 0)
return idx;
}
return -ENOENT;
}
int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
cpsw_ale_read(ale, idx, ale_entry);
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_VLAN)
continue;
if (cpsw_ale_get_vlan_id(ale_entry) == vid)
return idx;
}
return -ENOENT;
}
static int cpsw_ale_match_free(struct cpsw_ale *ale)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
cpsw_ale_read(ale, idx, ale_entry);
type = cpsw_ale_get_entry_type(ale_entry);
if (type == ALE_TYPE_FREE)
return idx;
}
return -ENOENT;
}
static int cpsw_ale_find_ageable(struct cpsw_ale *ale)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
cpsw_ale_read(ale, idx, ale_entry);
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
continue;
if (cpsw_ale_get_mcast(ale_entry))
continue;
type = cpsw_ale_get_ucast_type(ale_entry);
if (type != ALE_UCAST_PERSISTANT &&
type != ALE_UCAST_OUI)
return idx;
}
return -ENOENT;
}
static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
int port_mask)
{
int mask;
mask = cpsw_ale_get_port_mask(ale_entry);
if ((mask & port_mask) == 0)
return; /* ports dont intersect, not interested */
mask &= ~port_mask;
/* free if only remaining port is host port */
if (mask)
cpsw_ale_set_port_mask(ale_entry, mask);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int ret, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
cpsw_ale_read(ale, idx, ale_entry);
ret = cpsw_ale_get_entry_type(ale_entry);
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
continue;
if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6];
cpsw_ale_get_addr(ale_entry, addr);
if (!is_broadcast_ether_addr(addr))
cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
}
cpsw_ale_write(ale, idx, ale_entry);
}
return 0;
}
static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
int port_mask)
{
int port;
port = cpsw_ale_get_port_num(ale_entry);
if ((BIT(port) & port_mask) == 0)
return; /* ports dont intersect, not interested */
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int ret, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
cpsw_ale_read(ale, idx, ale_entry);
ret = cpsw_ale_get_entry_type(ale_entry);
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
continue;
if (cpsw_ale_get_mcast(ale_entry))
cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
else
cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
cpsw_ale_write(ale, idx, ale_entry);
}
return 0;
}
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
{
if (flags & ALE_VLAN) {
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
cpsw_ale_set_vlan_id(ale_entry, vid);
} else {
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
}
}
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
cpsw_ale_set_addr(ale_entry, addr);
cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_port_num(ale_entry, port);
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
if (idx < 0)
idx = cpsw_ale_find_ageable(ale);
if (idx < 0)
return -ENOMEM;
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -ENOENT;
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx, mask;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
cpsw_ale_set_addr(ale_entry, addr);
cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry);
port_mask |= mask;
cpsw_ale_set_port_mask(ale_entry, port_mask);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
if (idx < 0)
idx = cpsw_ale_find_ageable(ale);
if (idx < 0)
return -ENOMEM;
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -EINVAL;
cpsw_ale_read(ale, idx, ale_entry);
if (port_mask)
cpsw_ale_set_port_mask(ale_entry, port_mask);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
idx = cpsw_ale_match_vlan(ale, vid);
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
cpsw_ale_set_vlan_id(ale_entry, vid);
cpsw_ale_set_vlan_untag_force(ale_entry, untag);
cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
cpsw_ale_set_vlan_member_list(ale_entry, port);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
if (idx < 0)
idx = cpsw_ale_find_ageable(ale);
if (idx < 0)
return -ENOMEM;
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
idx = cpsw_ale_match_vlan(ale, vid);
if (idx < 0)
return -ENOENT;
cpsw_ale_read(ale, idx, ale_entry);
if (port_mask)
cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
struct ale_control_info {
const char *name;
int offset, port_offset;
int shift, port_shift;
int bits;
};
static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
[ALE_ENABLE] = {
.name = "enable",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 31,
.port_shift = 0,
.bits = 1,
},
[ALE_CLEAR] = {
.name = "clear",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 30,
.port_shift = 0,
.bits = 1,
},
[ALE_AGEOUT] = {
.name = "ageout",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 29,
.port_shift = 0,
.bits = 1,
},
[ALE_VLAN_NOLEARN] = {
.name = "vlan_nolearn",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 7,
.port_shift = 0,
.bits = 1,
},
[ALE_NO_PORT_VLAN] = {
.name = "no_port_vlan",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 6,
.port_shift = 0,
.bits = 1,
},
[ALE_OUI_DENY] = {
.name = "oui_deny",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 5,
.port_shift = 0,
.bits = 1,
},
[ALE_BYPASS] = {
.name = "bypass",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 4,
.port_shift = 0,
.bits = 1,
},
[ALE_RATE_LIMIT_TX] = {
.name = "rate_limit_tx",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 3,
.port_shift = 0,
.bits = 1,
},
[ALE_VLAN_AWARE] = {
.name = "vlan_aware",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 2,
.port_shift = 0,
.bits = 1,
},
[ALE_AUTH_ENABLE] = {
.name = "auth_enable",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 1,
.port_shift = 0,
.bits = 1,
},
[ALE_RATE_LIMIT] = {
.name = "rate_limit",
.offset = ALE_CONTROL,
.port_offset = 0,
.shift = 0,
.port_shift = 0,
.bits = 1,
},
[ALE_PORT_STATE] = {
.name = "port_state",
.offset = ALE_PORTCTL,
.port_offset = 4,
.shift = 0,
.port_shift = 0,
.bits = 2,
},
[ALE_PORT_DROP_UNTAGGED] = {
.name = "drop_untagged",
.offset = ALE_PORTCTL,
.port_offset = 4,
.shift = 2,
.port_shift = 0,
.bits = 1,
},
[ALE_PORT_DROP_UNKNOWN_VLAN] = {
.name = "drop_unknown",
.offset = ALE_PORTCTL,
.port_offset = 4,
.shift = 3,
.port_shift = 0,
.bits = 1,
},
[ALE_PORT_NOLEARN] = {
.name = "nolearn",
.offset = ALE_PORTCTL,
.port_offset = 4,
.shift = 4,
.port_shift = 0,
.bits = 1,
},
[ALE_PORT_MCAST_LIMIT] = {
.name = "mcast_limit",
.offset = ALE_PORTCTL,
.port_offset = 4,
.shift = 16,
.port_shift = 0,
.bits = 8,
},
[ALE_PORT_BCAST_LIMIT] = {
.name = "bcast_limit",
.offset = ALE_PORTCTL,
.port_offset = 4,
.shift = 24,
.port_shift = 0,
.bits = 8,
},
[ALE_PORT_UNKNOWN_VLAN_MEMBER] = {
.name = "unknown_vlan_member",
.offset = ALE_UNKNOWNVLAN,
.port_offset = 0,
.shift = 0,
.port_shift = 0,
.bits = 6,
},
[ALE_PORT_UNKNOWN_MCAST_FLOOD] = {
.name = "unknown_mcast_flood",
.offset = ALE_UNKNOWNVLAN,
.port_offset = 0,
.shift = 8,
.port_shift = 0,
.bits = 6,
},
[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD] = {
.name = "unknown_reg_flood",
.offset = ALE_UNKNOWNVLAN,
.port_offset = 0,
.shift = 16,
.port_shift = 0,
.bits = 6,
},
[ALE_PORT_UNTAGGED_EGRESS] = {
.name = "untagged_egress",
.offset = ALE_UNKNOWNVLAN,
.port_offset = 0,
.shift = 24,
.port_shift = 0,
.bits = 6,
},
};
int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
int value)
{
const struct ale_control_info *info;
int offset, shift;
u32 tmp, mask;
if (control < 0 || control >= ARRAY_SIZE(ale_controls))
return -EINVAL;
info = &ale_controls[control];
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
if (port < 0 || port > ale->params.ale_ports)
return -EINVAL;
mask = BITMASK(info->bits);
if (value & ~mask)
return -EINVAL;
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
tmp = __raw_readl(ale->params.ale_regs + offset);
tmp = (tmp & ~(mask << shift)) | (value << shift);
__raw_writel(tmp, ale->params.ale_regs + offset);
return 0;
}
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
{
const struct ale_control_info *info;
int offset, shift;
u32 tmp;
if (control < 0 || control >= ARRAY_SIZE(ale_controls))
return -EINVAL;
info = &ale_controls[control];
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
if (port < 0 || port > ale->params.ale_ports)
return -EINVAL;
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
return tmp & BITMASK(info->bits);
}
static void cpsw_ale_timer(unsigned long arg)
{
struct cpsw_ale *ale = (struct cpsw_ale *)arg;
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
if (ale->ageout) {
ale->timer.expires = jiffies + ale->ageout;
add_timer(&ale->timer);
}
}
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
{
del_timer_sync(&ale->timer);
ale->ageout = ageout * HZ;
if (ale->ageout) {
ale->timer.expires = jiffies + ale->ageout;
add_timer(&ale->timer);
}
return 0;
}
void cpsw_ale_start(struct cpsw_ale *ale)
{
u32 rev;
rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n",
ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev));
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
init_timer(&ale->timer);
ale->timer.data = (unsigned long)ale;
ale->timer.function = cpsw_ale_timer;
if (ale->ageout) {
ale->timer.expires = jiffies + ale->ageout;
add_timer(&ale->timer);
}
}
void cpsw_ale_stop(struct cpsw_ale *ale)
{
del_timer_sync(&ale->timer);
}
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
struct cpsw_ale *ale;
ale = kzalloc(sizeof(*ale), GFP_KERNEL);
if (!ale)
return NULL;
ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
return ale;
}
int cpsw_ale_destroy(struct cpsw_ale *ale)
{
if (!ale)
return -EINVAL;
cpsw_ale_stop(ale);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
kfree(ale);
return 0;
}
| gpl-2.0 |
PKUCloud/linux-3.11-cloud | drivers/gpu/drm/drm_scatter.c | 2371 | 5259 | /**
* \file drm_scatter.c
* IOCTLs to manage scatter/gather memory
*
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#define DEBUG_SCATTER 0
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
#else
return vmalloc_32(size);
#endif
}
void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
for (i = 0; i < entry->pages; i++) {
page = entry->pagelist[i];
if (page)
ClearPageReserved(page);
}
vfree(entry->virtual);
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
}
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
{
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
if (dev->sg)
return -EINVAL;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
if (!entry->pagelist) {
kfree(entry);
return -ENOMEM;
}
entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
if (!entry->busaddr) {
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
/* This also forces the mapping of COW pages, so our page list
* will be valid. Please don't remove it...
*/
memset(entry->virtual, 0, pages << PAGE_SHIFT);
entry->handle = ScatterHandle((unsigned long)entry->virtual);
DRM_DEBUG("handle = %08lx\n", entry->handle);
DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j])
goto failed;
SetPageReserved(entry->pagelist[j]);
}
request->handle = entry->handle;
dev->sg = entry;
#if DEBUG_SCATTER
/* Verify that each page points to its virtual address, and vice
* versa.
*/
{
int error = 0;
for (i = 0; i < pages; i++) {
unsigned long *tmp;
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0xcafebabe;
}
tmp = (unsigned long *)((u8 *) entry->virtual +
(PAGE_SIZE * i));
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
if (*tmp != 0xcafebabe && error == 0) {
error = 1;
DRM_ERROR("Scatter allocation error, "
"pagelist does not match "
"virtual mapping\n");
}
}
tmp = page_address(entry->pagelist[i]);
for (j = 0;
j < PAGE_SIZE / sizeof(unsigned long);
j++, tmp++) {
*tmp = 0;
}
}
if (error == 0)
DRM_ERROR("Scatter allocation matches pagelist\n");
}
#endif
return 0;
failed:
drm_sg_cleanup(entry);
return -ENOMEM;
}
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
return drm_sg_alloc(dev, request);
}
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
entry = dev->sg;
dev->sg = NULL;
if (!entry || entry->handle != request->handle)
return -EINVAL;
DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
return 0;
}
| gpl-2.0 |
AospPlus/android_kernel_htc_enrc2b-old | arch/x86/kernel/topology.c | 2883 | 2198 | /*
* Populate sysfs with topology information
*
* Written by: Matthew Dobson, IBM Corporation
* Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL
*
* Copyright (C) 2002, IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <colpatch@us.ibm.com>
*/
#include <linux/nodemask.h>
#include <linux/mmzone.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/cpu.h>
static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
#ifdef CONFIG_HOTPLUG_CPU
int __ref arch_register_cpu(int num)
{
/*
* CPU0 cannot be offlined due to several
* restrictions and assumptions in kernel. This basically
* doesn't add a control file, one cannot attempt to offline
* BSP.
*
* Also certain PCI quirks require not to enable hotplug control
* for all CPU's.
*/
if (num)
per_cpu(cpu_devices, num).cpu.hotpluggable = 1;
return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
}
EXPORT_SYMBOL(arch_register_cpu);
void arch_unregister_cpu(int num)
{
unregister_cpu(&per_cpu(cpu_devices, num).cpu);
}
EXPORT_SYMBOL(arch_unregister_cpu);
#else /* CONFIG_HOTPLUG_CPU */
static int __init arch_register_cpu(int num)
{
return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
}
#endif /* CONFIG_HOTPLUG_CPU */
static int __init topology_init(void)
{
int i;
#ifdef CONFIG_NUMA
for_each_online_node(i)
register_one_node(i);
#endif
for_each_present_cpu(i)
arch_register_cpu(i);
return 0;
}
subsys_initcall(topology_init);
| gpl-2.0 |
rooque/android_kernel_xiaomi_cancro | drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c | 3395 | 5467 | /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/device.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio.h>
#include <mach/rpc_pmapp.h>
#include <linux/err.h>
#include <linux/qcomwlan7x27a_pwrif.h>
#include <linux/module.h>
#define WLAN_GPIO_EXT_POR_N 134
static const char *id = "WLAN";
enum {
WLAN_VREG_L17 = 0,
WLAN_VREG_S3,
WLAN_VREG_TCXO_L11,
WLAN_VREG_L19,
WLAN_VREG_L5,
WLAN_VREG_L6
};
struct wlan_vreg_info {
const char *vreg_id;
unsigned int level_min;
unsigned int level_max;
unsigned int pmapp_id;
unsigned int is_vreg_pin_controlled;
struct regulator *reg;
};
static struct wlan_vreg_info vreg_info[] = {
{"bt", 3050000, 3050000, 21, 1, NULL},
{"msme1", 1800000, 1800000, 2, 0, NULL},
{"wlan_tcx0", 1800000, 1800000, 53, 0, NULL},
{"wlan4", 1200000, 1200000, 23, 0, NULL},
{"wlan2", 1350000, 1350000, 9, 1, NULL},
{"wlan3", 1200000, 1200000, 10, 1, NULL},
};
static int qrf6285_init_regs(void)
{
struct regulator_bulk_data regs[ARRAY_SIZE(vreg_info)];
int i, rc;
for (i = 0; i < ARRAY_SIZE(regs); i++) {
regs[i].supply = vreg_info[i].vreg_id;
regs[i].min_uV = vreg_info[i].level_min;
regs[i].max_uV = vreg_info[i].level_max;
}
rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs), regs);
if (rc) {
pr_err("%s: could not get regulators: %d\n", __func__, rc);
goto out;
}
for (i = 0; i < ARRAY_SIZE(regs); i++)
vreg_info[i].reg = regs[i].consumer;
return 0;
out:
return rc;
}
int chip_power_qrf6285(bool on)
{
static bool init_done;
int rc = 0, index = 0;
if (unlikely(!init_done)) {
rc = qrf6285_init_regs();
if (rc)
return rc;
else
init_done = true;
}
if (on) {
rc = gpio_request(WLAN_GPIO_EXT_POR_N, "WLAN_DEEP_SLEEP_N");
if (rc) {
pr_err("WLAN reset GPIO %d request failed %d\n",
WLAN_GPIO_EXT_POR_N, rc);
goto fail;
}
rc = gpio_direction_output(WLAN_GPIO_EXT_POR_N, 1);
if (rc < 0) {
pr_err("WLAN reset GPIO %d set direction failed %d\n",
WLAN_GPIO_EXT_POR_N, rc);
goto fail_gpio_dir_out;
}
rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0,
PMAPP_CLOCK_VOTE_ON);
if (rc) {
pr_err("%s: Configuring A0 to always"
" on failed %d\n", __func__, rc);
goto clock_vote_fail;
}
} else {
gpio_set_value_cansleep(WLAN_GPIO_EXT_POR_N, 0);
rc = gpio_direction_input(WLAN_GPIO_EXT_POR_N);
if (rc) {
pr_err("WLAN reset GPIO %d set direction failed %d\n",
WLAN_GPIO_EXT_POR_N, rc);
}
gpio_free(WLAN_GPIO_EXT_POR_N);
rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0,
PMAPP_CLOCK_VOTE_OFF);
if (rc) {
pr_err("%s: Configuring A0 to turn OFF"
" failed %d\n", __func__, rc);
}
}
for (index = 0; index < ARRAY_SIZE(vreg_info); index++) {
if (on) {
rc = regulator_set_voltage(vreg_info[index].reg,
vreg_info[index].level_min,
vreg_info[index].level_max);
if (rc) {
pr_err("%s:%s set voltage failed %d\n",
__func__, vreg_info[index].vreg_id, rc);
goto vreg_fail;
}
rc = regulator_enable(vreg_info[index].reg);
if (rc) {
pr_err("%s:%s vreg enable failed %d\n",
__func__, vreg_info[index].vreg_id, rc);
goto vreg_fail;
}
if (vreg_info[index].is_vreg_pin_controlled) {
rc = pmapp_vreg_lpm_pincntrl_vote(id,
vreg_info[index].pmapp_id,
PMAPP_CLOCK_ID_A0, 1);
if (rc) {
pr_err("%s:%s pmapp_vreg_lpm_pincntrl"
" for enable failed %d\n",
__func__,
vreg_info[index].vreg_id, rc);
goto vreg_clock_vote_fail;
}
}
/*At this point CLK_PWR_REQ is high*/
if (WLAN_VREG_L6 == index) {
/*
* Configure A0 clock to be slave to
* WLAN_CLK_PWR_REQ
` */
rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0,
PMAPP_CLOCK_VOTE_PIN_CTRL);
if (rc) {
pr_err("%s: Configuring A0 to Pin"
" controllable failed %d\n",
__func__, rc);
goto vreg_clock_vote_fail;
}
}
} else {
if (vreg_info[index].is_vreg_pin_controlled) {
rc = pmapp_vreg_lpm_pincntrl_vote(id,
vreg_info[index].pmapp_id,
PMAPP_CLOCK_ID_A0, 0);
if (rc) {
pr_err("%s:%s pmapp_vreg_lpm_pincntrl"
" for disable failed %d\n",
__func__,
vreg_info[index].vreg_id, rc);
}
}
rc = regulator_disable(vreg_info[index].reg);
if (rc) {
pr_err("%s:%s vreg disable failed %d\n",
__func__, vreg_info[index].vreg_id, rc);
}
}
}
return 0;
vreg_fail:
index--;
vreg_clock_vote_fail:
while (index >= 0) {
rc = regulator_disable(vreg_info[index].reg);
if (rc) {
pr_err("%s:%s vreg disable failed %d\n",
__func__, vreg_info[index].vreg_id, rc);
}
index--;
}
if (!on)
goto fail;
clock_vote_fail:
gpio_set_value_cansleep(WLAN_GPIO_EXT_POR_N, 0);
rc = gpio_direction_input(WLAN_GPIO_EXT_POR_N);
if (rc) {
pr_err("WLAN reset GPIO %d set direction failed %d\n",
WLAN_GPIO_EXT_POR_N, rc);
}
fail_gpio_dir_out:
gpio_free(WLAN_GPIO_EXT_POR_N);
fail:
return rc;
}
EXPORT_SYMBOL(chip_power_qrf6285);
| gpl-2.0 |
TV-LP51-Devices/kernel_sony_msm8974 | arch/arm/mach-msm/qdsp6v2/snddev_icodec.c | 3395 | 28479 | /* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/mfd/msm-adie-codec.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/wakelock.h>
#include <linux/pmic8058-othc.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/moduleparam.h>
#include <linux/pm_qos.h>
#include <asm/uaccess.h>
#include <mach/qdsp6v2/audio_dev_ctl.h>
#include <mach/qdsp6v2/audio_acdb.h>
#include <mach/vreg.h>
#include <mach/pmic.h>
#include <mach/debug_mm.h>
#include <mach/cpuidle.h>
#include <sound/q6afe.h>
#include <sound/apr_audio.h>
#include "snddev_icodec.h"
#define SNDDEV_ICODEC_PCM_SZ 32 /* 16 bit / sample stereo mode */
#define SNDDEV_ICODEC_MUL_FACTOR 3 /* Multi by 8 Shift by 3 */
#define SNDDEV_ICODEC_CLK_RATE(freq) \
(((freq) * (SNDDEV_ICODEC_PCM_SZ)) << (SNDDEV_ICODEC_MUL_FACTOR))
#define SNDDEV_LOW_POWER_MODE 0
#define SNDDEV_HIGH_POWER_MODE 1
/* Voltage required for S4 in microVolts, 2.2V or 2200000microvolts */
#define SNDDEV_VREG_8058_S4_VOLTAGE (2200000)
/* Load Current required for S4 in microAmps,
36mA - 56mA */
#define SNDDEV_VREG_LOW_POWER_LOAD (36000)
#define SNDDEV_VREG_HIGH_POWER_LOAD (56000)
bool msm_codec_i2s_slave_mode;
/* Context for each internal codec sound device */
struct snddev_icodec_state {
struct snddev_icodec_data *data;
struct adie_codec_path *adie_path;
u32 sample_rate;
u32 enabled;
};
/* Global state for the driver */
struct snddev_icodec_drv_state {
struct mutex rx_lock;
struct mutex lb_lock;
struct mutex tx_lock;
u32 rx_active; /* ensure one rx device at a time */
u32 tx_active; /* ensure one tx device at a time */
struct clk *rx_osrclk;
struct clk *rx_bitclk;
struct clk *tx_osrclk;
struct clk *tx_bitclk;
struct pm_qos_request rx_pm_qos_req;
struct pm_qos_request tx_pm_qos_req;
/* handle to pmic8058 regulator smps4 */
struct regulator *snddev_vreg;
};
static struct snddev_icodec_drv_state snddev_icodec_drv;
struct regulator *vreg_init(void)
{
int rc;
struct regulator *vreg_ptr;
vreg_ptr = regulator_get(NULL, "8058_s4");
if (IS_ERR(vreg_ptr)) {
pr_err("%s: regulator_get 8058_s4 failed\n", __func__);
return NULL;
}
rc = regulator_set_voltage(vreg_ptr, SNDDEV_VREG_8058_S4_VOLTAGE,
SNDDEV_VREG_8058_S4_VOLTAGE);
if (rc == 0)
return vreg_ptr;
else
return NULL;
}
static void vreg_deinit(struct regulator *vreg)
{
regulator_put(vreg);
}
static void vreg_mode_vote(struct regulator *vreg, int enable, int mode)
{
int rc;
if (enable) {
rc = regulator_enable(vreg);
if (rc != 0)
pr_err("%s:Enabling regulator failed\n", __func__);
else {
if (mode)
regulator_set_optimum_mode(vreg,
SNDDEV_VREG_HIGH_POWER_LOAD);
else
regulator_set_optimum_mode(vreg,
SNDDEV_VREG_LOW_POWER_LOAD);
}
} else {
rc = regulator_disable(vreg);
if (rc != 0)
pr_err("%s:Disabling regulator failed\n", __func__);
}
}
struct msm_cdcclk_ctl_state {
unsigned int rx_mclk;
unsigned int rx_mclk_requested;
unsigned int tx_mclk;
unsigned int tx_mclk_requested;
};
static struct msm_cdcclk_ctl_state the_msm_cdcclk_ctl_state;
static int msm_snddev_rx_mclk_request(void)
{
int rc = 0;
rc = gpio_request(the_msm_cdcclk_ctl_state.rx_mclk,
"MSM_SNDDEV_RX_MCLK");
if (rc < 0) {
pr_err("%s: GPIO request for MSM SNDDEV RX failed\n", __func__);
return rc;
}
the_msm_cdcclk_ctl_state.rx_mclk_requested = 1;
return rc;
}
static int msm_snddev_tx_mclk_request(void)
{
int rc = 0;
rc = gpio_request(the_msm_cdcclk_ctl_state.tx_mclk,
"MSM_SNDDEV_TX_MCLK");
if (rc < 0) {
pr_err("%s: GPIO request for MSM SNDDEV TX failed\n", __func__);
return rc;
}
the_msm_cdcclk_ctl_state.tx_mclk_requested = 1;
return rc;
}
static void msm_snddev_rx_mclk_free(void)
{
if (the_msm_cdcclk_ctl_state.rx_mclk_requested) {
gpio_free(the_msm_cdcclk_ctl_state.rx_mclk);
the_msm_cdcclk_ctl_state.rx_mclk_requested = 0;
}
}
static void msm_snddev_tx_mclk_free(void)
{
if (the_msm_cdcclk_ctl_state.tx_mclk_requested) {
gpio_free(the_msm_cdcclk_ctl_state.tx_mclk);
the_msm_cdcclk_ctl_state.tx_mclk_requested = 0;
}
}
static int get_msm_cdcclk_ctl_gpios(struct platform_device *pdev)
{
int rc = 0;
struct resource *res;
/* Claim all of the GPIOs. */
res = platform_get_resource_byname(pdev, IORESOURCE_IO,
"msm_snddev_rx_mclk");
if (!res) {
pr_err("%s: failed to get gpio MSM SNDDEV RX\n", __func__);
return -ENODEV;
}
the_msm_cdcclk_ctl_state.rx_mclk = res->start;
the_msm_cdcclk_ctl_state.rx_mclk_requested = 0;
res = platform_get_resource_byname(pdev, IORESOURCE_IO,
"msm_snddev_tx_mclk");
if (!res) {
pr_err("%s: failed to get gpio MSM SNDDEV TX\n", __func__);
return -ENODEV;
}
the_msm_cdcclk_ctl_state.tx_mclk = res->start;
the_msm_cdcclk_ctl_state.tx_mclk_requested = 0;
return rc;
}
static int msm_cdcclk_ctl_probe(struct platform_device *pdev)
{
int rc = 0;
rc = get_msm_cdcclk_ctl_gpios(pdev);
if (rc < 0) {
pr_err("%s: GPIO configuration failed\n", __func__);
return -ENODEV;
}
return rc;
}
static struct platform_driver msm_cdcclk_ctl_driver = {
.probe = msm_cdcclk_ctl_probe,
.driver = { .name = "msm_cdcclk_ctl"}
};
static int snddev_icodec_open_lb(struct snddev_icodec_state *icodec)
{
int trc;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
/* Voting for low power is ok here as all use cases are
* supported in low power mode.
*/
if (drv->snddev_vreg)
vreg_mode_vote(drv->snddev_vreg, 1,
SNDDEV_LOW_POWER_MODE);
if (icodec->data->voltage_on)
icodec->data->voltage_on();
trc = adie_codec_open(icodec->data->profile, &icodec->adie_path);
if (IS_ERR_VALUE(trc))
pr_err("%s: adie codec open failed\n", __func__);
else
adie_codec_setpath(icodec->adie_path,
icodec->sample_rate, 256);
if (icodec->adie_path)
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_ANALOG_READY);
if (icodec->data->pamp_on)
icodec->data->pamp_on();
icodec->enabled = 1;
return 0;
}
static int initialize_msm_icodec_gpios(struct platform_device *pdev)
{
int rc = 0;
struct resource *res;
int i = 0;
int *reg_defaults = pdev->dev.platform_data;
while ((res = platform_get_resource(pdev, IORESOURCE_IO, i))) {
rc = gpio_request(res->start, res->name);
if (rc) {
pr_err("%s: icodec gpio %d request failed\n", __func__,
res->start);
goto err;
} else {
/* This platform data structure only works if all gpio
* resources are to be used only in output mode.
* If gpio resources are added which are to be used in
* input mode, then the platform data structure will
* have to be changed.
*/
gpio_direction_output(res->start, reg_defaults[i]);
gpio_free(res->start);
}
i++;
}
err:
return rc;
}
static int msm_icodec_gpio_probe(struct platform_device *pdev)
{
int rc = 0;
rc = initialize_msm_icodec_gpios(pdev);
if (rc < 0) {
pr_err("%s: GPIO configuration failed\n", __func__);
return -ENODEV;
}
return rc;
}
static struct platform_driver msm_icodec_gpio_driver = {
.probe = msm_icodec_gpio_probe,
.driver = { .name = "msm_icodec_gpio"}
};
static int snddev_icodec_open_rx(struct snddev_icodec_state *icodec)
{
int trc;
int afe_channel_mode;
union afe_port_config afe_config;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
pm_qos_update_request(&drv->rx_pm_qos_req,
msm_cpuidle_get_deep_idle_latency());
if (drv->snddev_vreg) {
if (!strcmp(icodec->data->name, "headset_stereo_rx"))
vreg_mode_vote(drv->snddev_vreg, 1,
SNDDEV_LOW_POWER_MODE);
else
vreg_mode_vote(drv->snddev_vreg, 1,
SNDDEV_HIGH_POWER_MODE);
}
msm_snddev_rx_mclk_request();
drv->rx_osrclk = clk_get_sys(NULL, "i2s_spkr_osr_clk");
if (IS_ERR(drv->rx_osrclk))
pr_err("%s master clock Error\n", __func__);
trc = clk_set_rate(drv->rx_osrclk,
SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate));
if (IS_ERR_VALUE(trc)) {
pr_err("ERROR setting m clock1\n");
goto error_invalid_freq;
}
clk_prepare_enable(drv->rx_osrclk);
drv->rx_bitclk = clk_get_sys(NULL, "i2s_spkr_bit_clk");
if (IS_ERR(drv->rx_bitclk))
pr_err("%s clock Error\n", __func__);
/* Master clock = Sample Rate * OSR rate bit clock
* OSR Rate bit clock = bit/sample * channel master
* clock / bit clock = divider value = 8
*/
if (msm_codec_i2s_slave_mode) {
pr_info("%s: configuring bit clock for slave mode\n",
__func__);
trc = clk_set_rate(drv->rx_bitclk, 0);
} else
trc = clk_set_rate(drv->rx_bitclk, 8);
if (IS_ERR_VALUE(trc)) {
pr_err("ERROR setting m clock1\n");
goto error_adie;
}
clk_prepare_enable(drv->rx_bitclk);
if (icodec->data->voltage_on)
icodec->data->voltage_on();
/* Configure ADIE */
trc = adie_codec_open(icodec->data->profile, &icodec->adie_path);
if (IS_ERR_VALUE(trc))
pr_err("%s: adie codec open failed\n", __func__);
else
adie_codec_setpath(icodec->adie_path,
icodec->sample_rate, 256);
/* OSR default to 256, can be changed for power optimization
* If OSR is to be changed, need clock API for setting the divider
*/
switch (icodec->data->channel_mode) {
case 2:
afe_channel_mode = MSM_AFE_STEREO;
break;
case 1:
default:
afe_channel_mode = MSM_AFE_MONO;
break;
}
afe_config.mi2s.channel = afe_channel_mode;
afe_config.mi2s.bitwidth = 16;
afe_config.mi2s.line = 1;
afe_config.mi2s.format = MSM_AFE_I2S_FORMAT_LPCM;
if (msm_codec_i2s_slave_mode)
afe_config.mi2s.ws = 0;
else
afe_config.mi2s.ws = 1;
trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate);
if (trc < 0)
pr_err("%s: afe open failed, trc = %d\n", __func__, trc);
/* Enable ADIE */
if (icodec->adie_path) {
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_READY);
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_ANALOG_READY);
}
if (msm_codec_i2s_slave_mode)
adie_codec_set_master_mode(icodec->adie_path, 1);
else
adie_codec_set_master_mode(icodec->adie_path, 0);
/* Enable power amplifier */
if (icodec->data->pamp_on) {
if (icodec->data->pamp_on()) {
pr_err("%s: Error turning on rx power\n", __func__);
goto error_pamp;
}
}
icodec->enabled = 1;
pm_qos_update_request(&drv->rx_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
error_pamp:
error_adie:
clk_disable_unprepare(drv->rx_osrclk);
error_invalid_freq:
pr_err("%s: encounter error\n", __func__);
pm_qos_update_request(&drv->rx_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return -ENODEV;
}
static int snddev_icodec_open_tx(struct snddev_icodec_state *icodec)
{
int trc;
int afe_channel_mode;
union afe_port_config afe_config;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;;
pm_qos_update_request(&drv->tx_pm_qos_req,
msm_cpuidle_get_deep_idle_latency());
if (drv->snddev_vreg)
vreg_mode_vote(drv->snddev_vreg, 1, SNDDEV_HIGH_POWER_MODE);
/* Reuse pamp_on for TX platform-specific setup */
if (icodec->data->pamp_on) {
if (icodec->data->pamp_on()) {
pr_err("%s: Error turning on tx power\n", __func__);
goto error_pamp;
}
}
msm_snddev_tx_mclk_request();
drv->tx_osrclk = clk_get_sys(NULL, "i2s_mic_osr_clk");
if (IS_ERR(drv->tx_osrclk))
pr_err("%s master clock Error\n", __func__);
trc = clk_set_rate(drv->tx_osrclk,
SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate));
if (IS_ERR_VALUE(trc)) {
pr_err("ERROR setting m clock1\n");
goto error_invalid_freq;
}
clk_prepare_enable(drv->tx_osrclk);
drv->tx_bitclk = clk_get_sys(NULL, "i2s_mic_bit_clk");
if (IS_ERR(drv->tx_bitclk))
pr_err("%s clock Error\n", __func__);
/* Master clock = Sample Rate * OSR rate bit clock
* OSR Rate bit clock = bit/sample * channel master
* clock / bit clock = divider value = 8
*/
if (msm_codec_i2s_slave_mode) {
pr_info("%s: configuring bit clock for slave mode\n",
__func__);
trc = clk_set_rate(drv->tx_bitclk, 0);
} else
trc = clk_set_rate(drv->tx_bitclk, 8);
clk_prepare_enable(drv->tx_bitclk);
/* Enable ADIE */
trc = adie_codec_open(icodec->data->profile, &icodec->adie_path);
if (IS_ERR_VALUE(trc))
pr_err("%s: adie codec open failed\n", __func__);
else
adie_codec_setpath(icodec->adie_path,
icodec->sample_rate, 256);
switch (icodec->data->channel_mode) {
case 2:
afe_channel_mode = MSM_AFE_STEREO;
break;
case 1:
default:
afe_channel_mode = MSM_AFE_MONO;
break;
}
afe_config.mi2s.channel = afe_channel_mode;
afe_config.mi2s.bitwidth = 16;
afe_config.mi2s.line = 1;
afe_config.mi2s.format = MSM_AFE_I2S_FORMAT_LPCM;
if (msm_codec_i2s_slave_mode)
afe_config.mi2s.ws = 0;
else
afe_config.mi2s.ws = 1;
trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate);
if (icodec->adie_path) {
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_READY);
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_ANALOG_READY);
}
if (msm_codec_i2s_slave_mode)
adie_codec_set_master_mode(icodec->adie_path, 1);
else
adie_codec_set_master_mode(icodec->adie_path, 0);
icodec->enabled = 1;
pm_qos_update_request(&drv->tx_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
error_invalid_freq:
if (icodec->data->pamp_off)
icodec->data->pamp_off();
pr_err("%s: encounter error\n", __func__);
error_pamp:
pm_qos_update_request(&drv->tx_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return -ENODEV;
}
static int snddev_icodec_close_lb(struct snddev_icodec_state *icodec)
{
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
/* Disable power amplifier */
if (icodec->data->pamp_off)
icodec->data->pamp_off();
if (drv->snddev_vreg)
vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_LOW_POWER_MODE);
if (icodec->adie_path) {
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_OFF);
adie_codec_close(icodec->adie_path);
icodec->adie_path = NULL;
}
if (icodec->data->voltage_off)
icodec->data->voltage_off();
return 0;
}
static int snddev_icodec_close_rx(struct snddev_icodec_state *icodec)
{
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
pm_qos_update_request(&drv->rx_pm_qos_req,
msm_cpuidle_get_deep_idle_latency());
if (drv->snddev_vreg)
vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_HIGH_POWER_MODE);
/* Disable power amplifier */
if (icodec->data->pamp_off)
icodec->data->pamp_off();
/* Disable ADIE */
if (icodec->adie_path) {
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_OFF);
adie_codec_close(icodec->adie_path);
icodec->adie_path = NULL;
}
afe_close(icodec->data->copp_id);
if (icodec->data->voltage_off)
icodec->data->voltage_off();
clk_disable_unprepare(drv->rx_bitclk);
clk_disable_unprepare(drv->rx_osrclk);
msm_snddev_rx_mclk_free();
icodec->enabled = 0;
pm_qos_update_request(&drv->rx_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
}
static int snddev_icodec_close_tx(struct snddev_icodec_state *icodec)
{
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
pm_qos_update_request(&drv->tx_pm_qos_req,
msm_cpuidle_get_deep_idle_latency());
if (drv->snddev_vreg)
vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_HIGH_POWER_MODE);
/* Disable ADIE */
if (icodec->adie_path) {
adie_codec_proceed_stage(icodec->adie_path,
ADIE_CODEC_DIGITAL_OFF);
adie_codec_close(icodec->adie_path);
icodec->adie_path = NULL;
}
afe_close(icodec->data->copp_id);
clk_disable_unprepare(drv->tx_bitclk);
clk_disable_unprepare(drv->tx_osrclk);
msm_snddev_tx_mclk_free();
/* Reuse pamp_off for TX platform-specific setup */
if (icodec->data->pamp_off)
icodec->data->pamp_off();
icodec->enabled = 0;
pm_qos_update_request(&drv->tx_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
}
static int snddev_icodec_set_device_volume_impl(
struct msm_snddev_info *dev_info, u32 volume)
{
struct snddev_icodec_state *icodec;
int rc = 0;
icodec = dev_info->private_data;
if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_DIGITAL) {
rc = adie_codec_set_device_digital_volume(icodec->adie_path,
icodec->data->channel_mode, volume);
if (rc < 0) {
pr_err("%s: unable to set_device_digital_volume for"
"%s volume in percentage = %u\n",
__func__, dev_info->name, volume);
return rc;
}
} else if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_ANALOG) {
rc = adie_codec_set_device_analog_volume(icodec->adie_path,
icodec->data->channel_mode, volume);
if (rc < 0) {
pr_err("%s: unable to set_device_analog_volume for"
"%s volume in percentage = %u\n",
__func__, dev_info->name, volume);
return rc;
}
} else {
pr_err("%s: Invalid device volume control\n", __func__);
return -EPERM;
}
return rc;
}
static int snddev_icodec_open(struct msm_snddev_info *dev_info)
{
int rc = 0;
struct snddev_icodec_state *icodec;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
if (!dev_info) {
rc = -EINVAL;
goto error;
}
icodec = dev_info->private_data;
if (icodec->data->capability & SNDDEV_CAP_RX) {
mutex_lock(&drv->rx_lock);
if (drv->rx_active) {
mutex_unlock(&drv->rx_lock);
pr_err("%s: rx_active is set, return EBUSY\n",
__func__);
rc = -EBUSY;
goto error;
}
rc = snddev_icodec_open_rx(icodec);
if (!IS_ERR_VALUE(rc)) {
if ((icodec->data->dev_vol_type & (
SNDDEV_DEV_VOL_DIGITAL |
SNDDEV_DEV_VOL_ANALOG)))
rc = snddev_icodec_set_device_volume_impl(
dev_info, dev_info->dev_volume);
if (!IS_ERR_VALUE(rc))
drv->rx_active = 1;
else
pr_err("%s: set_device_volume_impl"
" error(rx) = %d\n", __func__, rc);
}
mutex_unlock(&drv->rx_lock);
} else if (icodec->data->capability & SNDDEV_CAP_LB) {
mutex_lock(&drv->lb_lock);
rc = snddev_icodec_open_lb(icodec);
if (!IS_ERR_VALUE(rc)) {
if ((icodec->data->dev_vol_type & (
SNDDEV_DEV_VOL_DIGITAL |
SNDDEV_DEV_VOL_ANALOG)))
rc = snddev_icodec_set_device_volume_impl(
dev_info, dev_info->dev_volume);
}
mutex_unlock(&drv->lb_lock);
} else {
mutex_lock(&drv->tx_lock);
if (drv->tx_active) {
mutex_unlock(&drv->tx_lock);
pr_err("%s: tx_active is set, return EBUSY\n",
__func__);
rc = -EBUSY;
goto error;
}
rc = snddev_icodec_open_tx(icodec);
if (!IS_ERR_VALUE(rc)) {
if ((icodec->data->dev_vol_type & (
SNDDEV_DEV_VOL_DIGITAL |
SNDDEV_DEV_VOL_ANALOG)))
rc = snddev_icodec_set_device_volume_impl(
dev_info, dev_info->dev_volume);
if (!IS_ERR_VALUE(rc))
drv->tx_active = 1;
else
pr_err("%s: set_device_volume_impl"
" error(tx) = %d\n", __func__, rc);
}
mutex_unlock(&drv->tx_lock);
}
error:
return rc;
}
static int snddev_icodec_close(struct msm_snddev_info *dev_info)
{
int rc = 0;
struct snddev_icodec_state *icodec;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
if (!dev_info) {
rc = -EINVAL;
goto error;
}
icodec = dev_info->private_data;
if (icodec->data->capability & SNDDEV_CAP_RX) {
mutex_lock(&drv->rx_lock);
if (!drv->rx_active) {
mutex_unlock(&drv->rx_lock);
pr_err("%s: rx_active not set, return\n", __func__);
rc = -EPERM;
goto error;
}
rc = snddev_icodec_close_rx(icodec);
if (!IS_ERR_VALUE(rc))
drv->rx_active = 0;
else
pr_err("%s: close rx failed, rc = %d\n", __func__, rc);
mutex_unlock(&drv->rx_lock);
} else if (icodec->data->capability & SNDDEV_CAP_LB) {
mutex_lock(&drv->lb_lock);
rc = snddev_icodec_close_lb(icodec);
mutex_unlock(&drv->lb_lock);
} else {
mutex_lock(&drv->tx_lock);
if (!drv->tx_active) {
mutex_unlock(&drv->tx_lock);
pr_err("%s: tx_active not set, return\n", __func__);
rc = -EPERM;
goto error;
}
rc = snddev_icodec_close_tx(icodec);
if (!IS_ERR_VALUE(rc))
drv->tx_active = 0;
else
pr_err("%s: close tx failed, rc = %d\n", __func__, rc);
mutex_unlock(&drv->tx_lock);
}
error:
return rc;
}
static int snddev_icodec_check_freq(u32 req_freq)
{
int rc = -EINVAL;
if ((req_freq != 0) && (req_freq >= 8000) && (req_freq <= 48000)) {
if ((req_freq == 8000) || (req_freq == 11025) ||
(req_freq == 12000) || (req_freq == 16000) ||
(req_freq == 22050) || (req_freq == 24000) ||
(req_freq == 32000) || (req_freq == 44100) ||
(req_freq == 48000)) {
rc = 0;
} else
pr_info("%s: Unsupported Frequency:%d\n", __func__,
req_freq);
}
return rc;
}
static int snddev_icodec_set_freq(struct msm_snddev_info *dev_info, u32 rate)
{
int rc;
struct snddev_icodec_state *icodec;
if (!dev_info) {
rc = -EINVAL;
goto error;
}
icodec = dev_info->private_data;
if (adie_codec_freq_supported(icodec->data->profile, rate) != 0) {
pr_err("%s: adie_codec_freq_supported() failed\n", __func__);
rc = -EINVAL;
goto error;
} else {
if (snddev_icodec_check_freq(rate) != 0) {
pr_err("%s: check_freq failed\n", __func__);
rc = -EINVAL;
goto error;
} else
icodec->sample_rate = rate;
}
if (icodec->enabled) {
snddev_icodec_close(dev_info);
snddev_icodec_open(dev_info);
}
return icodec->sample_rate;
error:
return rc;
}
static int snddev_icodec_enable_sidetone(struct msm_snddev_info *dev_info,
u32 enable, uint16_t gain)
{
int rc = 0;
struct snddev_icodec_state *icodec;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
if (!dev_info) {
pr_err("invalid dev_info\n");
rc = -EINVAL;
goto error;
}
icodec = dev_info->private_data;
if (icodec->data->capability & SNDDEV_CAP_RX) {
mutex_lock(&drv->rx_lock);
if (!drv->rx_active || !dev_info->opened) {
pr_err("dev not active\n");
rc = -EPERM;
mutex_unlock(&drv->rx_lock);
goto error;
}
rc = afe_sidetone(PRIMARY_I2S_TX, PRIMARY_I2S_RX, enable, gain);
if (rc < 0)
pr_err("%s: AFE command sidetone failed\n", __func__);
mutex_unlock(&drv->rx_lock);
} else {
rc = -EINVAL;
pr_err("rx device only\n");
}
error:
return rc;
}
static int snddev_icodec_enable_anc(struct msm_snddev_info *dev_info,
u32 enable)
{
int rc = 0;
struct adie_codec_anc_data *reg_writes;
struct acdb_cal_block cal_block;
struct snddev_icodec_state *icodec;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
pr_info("%s: enable=%d\n", __func__, enable);
if (!dev_info) {
pr_err("invalid dev_info\n");
rc = -EINVAL;
goto error;
}
icodec = dev_info->private_data;
if ((icodec->data->capability & SNDDEV_CAP_RX) &&
(icodec->data->capability & SNDDEV_CAP_ANC)) {
mutex_lock(&drv->rx_lock);
if (!drv->rx_active || !dev_info->opened) {
pr_err("dev not active\n");
rc = -EPERM;
mutex_unlock(&drv->rx_lock);
goto error;
}
if (enable) {
get_anc_cal(&cal_block);
reg_writes = (struct adie_codec_anc_data *)
cal_block.cal_kvaddr;
if (reg_writes == NULL) {
pr_err("error, no calibration data\n");
rc = -1;
mutex_unlock(&drv->rx_lock);
goto error;
}
rc = adie_codec_enable_anc(icodec->adie_path,
1, reg_writes);
} else {
rc = adie_codec_enable_anc(icodec->adie_path,
0, NULL);
}
mutex_unlock(&drv->rx_lock);
} else {
rc = -EINVAL;
pr_err("rx and ANC device only\n");
}
error:
return rc;
}
int snddev_icodec_set_device_volume(struct msm_snddev_info *dev_info,
u32 volume)
{
struct snddev_icodec_state *icodec;
struct mutex *lock;
struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;
int rc = -EPERM;
if (!dev_info) {
pr_info("%s : device not intilized.\n", __func__);
return -EINVAL;
}
icodec = dev_info->private_data;
if (!(icodec->data->dev_vol_type & (SNDDEV_DEV_VOL_DIGITAL
| SNDDEV_DEV_VOL_ANALOG))) {
pr_info("%s : device %s does not support device volume "
"control.", __func__, dev_info->name);
return -EPERM;
}
dev_info->dev_volume = volume;
if (icodec->data->capability & SNDDEV_CAP_RX)
lock = &drv->rx_lock;
else if (icodec->data->capability & SNDDEV_CAP_LB)
lock = &drv->lb_lock;
else
lock = &drv->tx_lock;
mutex_lock(lock);
rc = snddev_icodec_set_device_volume_impl(dev_info,
dev_info->dev_volume);
mutex_unlock(lock);
return rc;
}
static int snddev_icodec_probe(struct platform_device *pdev)
{
int rc = 0;
struct snddev_icodec_data *pdata;
struct msm_snddev_info *dev_info;
struct snddev_icodec_state *icodec;
if (!pdev || !pdev->dev.platform_data) {
printk(KERN_ALERT "Invalid caller\n");
rc = -1;
goto error;
}
pdata = pdev->dev.platform_data;
if ((pdata->capability & SNDDEV_CAP_RX) &&
(pdata->capability & SNDDEV_CAP_TX)) {
pr_err("%s: invalid device data either RX or TX\n", __func__);
goto error;
}
icodec = kzalloc(sizeof(struct snddev_icodec_state), GFP_KERNEL);
if (!icodec) {
rc = -ENOMEM;
goto error;
}
dev_info = kmalloc(sizeof(struct msm_snddev_info), GFP_KERNEL);
if (!dev_info) {
kfree(icodec);
rc = -ENOMEM;
goto error;
}
dev_info->name = pdata->name;
dev_info->copp_id = pdata->copp_id;
dev_info->private_data = (void *) icodec;
dev_info->dev_ops.open = snddev_icodec_open;
dev_info->dev_ops.close = snddev_icodec_close;
dev_info->dev_ops.set_freq = snddev_icodec_set_freq;
dev_info->dev_ops.set_device_volume = snddev_icodec_set_device_volume;
dev_info->capability = pdata->capability;
dev_info->opened = 0;
msm_snddev_register(dev_info);
icodec->data = pdata;
icodec->sample_rate = pdata->default_sample_rate;
dev_info->sample_rate = pdata->default_sample_rate;
dev_info->channel_mode = pdata->channel_mode;
if (pdata->capability & SNDDEV_CAP_RX)
dev_info->dev_ops.enable_sidetone =
snddev_icodec_enable_sidetone;
else
dev_info->dev_ops.enable_sidetone = NULL;
if (pdata->capability & SNDDEV_CAP_ANC) {
dev_info->dev_ops.enable_anc =
snddev_icodec_enable_anc;
} else {
dev_info->dev_ops.enable_anc = NULL;
}
error:
return rc;
}
static int snddev_icodec_remove(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver snddev_icodec_driver = {
.probe = snddev_icodec_probe,
.remove = snddev_icodec_remove,
.driver = { .name = "snddev_icodec" }
};
module_param(msm_codec_i2s_slave_mode, bool, 0);
MODULE_PARM_DESC(msm_codec_i2s_slave_mode, "Set MSM to I2S slave clock mode");
static int __init snddev_icodec_init(void)
{
s32 rc;
struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv;
rc = platform_driver_register(&snddev_icodec_driver);
if (IS_ERR_VALUE(rc)) {
pr_err("%s: platform_driver_register for snddev icodec failed\n",
__func__);
goto error_snddev_icodec_driver;
}
rc = platform_driver_register(&msm_cdcclk_ctl_driver);
if (IS_ERR_VALUE(rc)) {
pr_err("%s: platform_driver_register for msm snddev failed\n",
__func__);
goto error_msm_cdcclk_ctl_driver;
}
rc = platform_driver_register(&msm_icodec_gpio_driver);
if (IS_ERR_VALUE(rc)) {
pr_err("%s: platform_driver_register for msm snddev gpio failed\n",
__func__);
goto error_msm_icodec_gpio_driver;
}
mutex_init(&icodec_drv->rx_lock);
mutex_init(&icodec_drv->lb_lock);
mutex_init(&icodec_drv->tx_lock);
icodec_drv->rx_active = 0;
icodec_drv->tx_active = 0;
icodec_drv->snddev_vreg = vreg_init();
pm_qos_add_request(&icodec_drv->tx_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
pm_qos_add_request(&icodec_drv->rx_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
return 0;
error_msm_icodec_gpio_driver:
platform_driver_unregister(&msm_cdcclk_ctl_driver);
error_msm_cdcclk_ctl_driver:
platform_driver_unregister(&snddev_icodec_driver);
error_snddev_icodec_driver:
return -ENODEV;
}
static void __exit snddev_icodec_exit(void)
{
struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv;
platform_driver_unregister(&snddev_icodec_driver);
platform_driver_unregister(&msm_cdcclk_ctl_driver);
platform_driver_unregister(&msm_icodec_gpio_driver);
clk_put(icodec_drv->rx_osrclk);
clk_put(icodec_drv->tx_osrclk);
if (icodec_drv->snddev_vreg) {
vreg_deinit(icodec_drv->snddev_vreg);
icodec_drv->snddev_vreg = NULL;
}
return;
}
module_init(snddev_icodec_init);
module_exit(snddev_icodec_exit);
MODULE_DESCRIPTION("ICodec Sound Device driver");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
byzvulture/android_kernel_nubia_nx507j | sound/soc/codecs/max98095.c | 3651 | 64995 | /*
* max98095.c -- MAX98095 ALSA SoC Audio driver
*
* Copyright 2011 Maxim Integrated Products
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <linux/slab.h>
#include <asm/div64.h>
#include <sound/max98095.h>
#include "max98095.h"
enum max98095_type {
MAX98095,
};
struct max98095_cdata {
unsigned int rate;
unsigned int fmt;
int eq_sel;
int bq_sel;
};
struct max98095_priv {
enum max98095_type devtype;
struct max98095_pdata *pdata;
unsigned int sysclk;
struct max98095_cdata dai[3];
const char **eq_texts;
const char **bq_texts;
struct soc_enum eq_enum;
struct soc_enum bq_enum;
int eq_textcnt;
int bq_textcnt;
u8 lin_state;
unsigned int mic1pre;
unsigned int mic2pre;
};
static const u8 max98095_reg_def[M98095_REG_CNT] = {
0x00, /* 00 */
0x00, /* 01 */
0x00, /* 02 */
0x00, /* 03 */
0x00, /* 04 */
0x00, /* 05 */
0x00, /* 06 */
0x00, /* 07 */
0x00, /* 08 */
0x00, /* 09 */
0x00, /* 0A */
0x00, /* 0B */
0x00, /* 0C */
0x00, /* 0D */
0x00, /* 0E */
0x00, /* 0F */
0x00, /* 10 */
0x00, /* 11 */
0x00, /* 12 */
0x00, /* 13 */
0x00, /* 14 */
0x00, /* 15 */
0x00, /* 16 */
0x00, /* 17 */
0x00, /* 18 */
0x00, /* 19 */
0x00, /* 1A */
0x00, /* 1B */
0x00, /* 1C */
0x00, /* 1D */
0x00, /* 1E */
0x00, /* 1F */
0x00, /* 20 */
0x00, /* 21 */
0x00, /* 22 */
0x00, /* 23 */
0x00, /* 24 */
0x00, /* 25 */
0x00, /* 26 */
0x00, /* 27 */
0x00, /* 28 */
0x00, /* 29 */
0x00, /* 2A */
0x00, /* 2B */
0x00, /* 2C */
0x00, /* 2D */
0x00, /* 2E */
0x00, /* 2F */
0x00, /* 30 */
0x00, /* 31 */
0x00, /* 32 */
0x00, /* 33 */
0x00, /* 34 */
0x00, /* 35 */
0x00, /* 36 */
0x00, /* 37 */
0x00, /* 38 */
0x00, /* 39 */
0x00, /* 3A */
0x00, /* 3B */
0x00, /* 3C */
0x00, /* 3D */
0x00, /* 3E */
0x00, /* 3F */
0x00, /* 40 */
0x00, /* 41 */
0x00, /* 42 */
0x00, /* 43 */
0x00, /* 44 */
0x00, /* 45 */
0x00, /* 46 */
0x00, /* 47 */
0x00, /* 48 */
0x00, /* 49 */
0x00, /* 4A */
0x00, /* 4B */
0x00, /* 4C */
0x00, /* 4D */
0x00, /* 4E */
0x00, /* 4F */
0x00, /* 50 */
0x00, /* 51 */
0x00, /* 52 */
0x00, /* 53 */
0x00, /* 54 */
0x00, /* 55 */
0x00, /* 56 */
0x00, /* 57 */
0x00, /* 58 */
0x00, /* 59 */
0x00, /* 5A */
0x00, /* 5B */
0x00, /* 5C */
0x00, /* 5D */
0x00, /* 5E */
0x00, /* 5F */
0x00, /* 60 */
0x00, /* 61 */
0x00, /* 62 */
0x00, /* 63 */
0x00, /* 64 */
0x00, /* 65 */
0x00, /* 66 */
0x00, /* 67 */
0x00, /* 68 */
0x00, /* 69 */
0x00, /* 6A */
0x00, /* 6B */
0x00, /* 6C */
0x00, /* 6D */
0x00, /* 6E */
0x00, /* 6F */
0x00, /* 70 */
0x00, /* 71 */
0x00, /* 72 */
0x00, /* 73 */
0x00, /* 74 */
0x00, /* 75 */
0x00, /* 76 */
0x00, /* 77 */
0x00, /* 78 */
0x00, /* 79 */
0x00, /* 7A */
0x00, /* 7B */
0x00, /* 7C */
0x00, /* 7D */
0x00, /* 7E */
0x00, /* 7F */
0x00, /* 80 */
0x00, /* 81 */
0x00, /* 82 */
0x00, /* 83 */
0x00, /* 84 */
0x00, /* 85 */
0x00, /* 86 */
0x00, /* 87 */
0x00, /* 88 */
0x00, /* 89 */
0x00, /* 8A */
0x00, /* 8B */
0x00, /* 8C */
0x00, /* 8D */
0x00, /* 8E */
0x00, /* 8F */
0x00, /* 90 */
0x00, /* 91 */
0x30, /* 92 */
0xF0, /* 93 */
0x00, /* 94 */
0x00, /* 95 */
0x3F, /* 96 */
0x00, /* 97 */
0x00, /* 98 */
0x00, /* 99 */
0x00, /* 9A */
0x00, /* 9B */
0x00, /* 9C */
0x00, /* 9D */
0x00, /* 9E */
0x00, /* 9F */
0x00, /* A0 */
0x00, /* A1 */
0x00, /* A2 */
0x00, /* A3 */
0x00, /* A4 */
0x00, /* A5 */
0x00, /* A6 */
0x00, /* A7 */
0x00, /* A8 */
0x00, /* A9 */
0x00, /* AA */
0x00, /* AB */
0x00, /* AC */
0x00, /* AD */
0x00, /* AE */
0x00, /* AF */
0x00, /* B0 */
0x00, /* B1 */
0x00, /* B2 */
0x00, /* B3 */
0x00, /* B4 */
0x00, /* B5 */
0x00, /* B6 */
0x00, /* B7 */
0x00, /* B8 */
0x00, /* B9 */
0x00, /* BA */
0x00, /* BB */
0x00, /* BC */
0x00, /* BD */
0x00, /* BE */
0x00, /* BF */
0x00, /* C0 */
0x00, /* C1 */
0x00, /* C2 */
0x00, /* C3 */
0x00, /* C4 */
0x00, /* C5 */
0x00, /* C6 */
0x00, /* C7 */
0x00, /* C8 */
0x00, /* C9 */
0x00, /* CA */
0x00, /* CB */
0x00, /* CC */
0x00, /* CD */
0x00, /* CE */
0x00, /* CF */
0x00, /* D0 */
0x00, /* D1 */
0x00, /* D2 */
0x00, /* D3 */
0x00, /* D4 */
0x00, /* D5 */
0x00, /* D6 */
0x00, /* D7 */
0x00, /* D8 */
0x00, /* D9 */
0x00, /* DA */
0x00, /* DB */
0x00, /* DC */
0x00, /* DD */
0x00, /* DE */
0x00, /* DF */
0x00, /* E0 */
0x00, /* E1 */
0x00, /* E2 */
0x00, /* E3 */
0x00, /* E4 */
0x00, /* E5 */
0x00, /* E6 */
0x00, /* E7 */
0x00, /* E8 */
0x00, /* E9 */
0x00, /* EA */
0x00, /* EB */
0x00, /* EC */
0x00, /* ED */
0x00, /* EE */
0x00, /* EF */
0x00, /* F0 */
0x00, /* F1 */
0x00, /* F2 */
0x00, /* F3 */
0x00, /* F4 */
0x00, /* F5 */
0x00, /* F6 */
0x00, /* F7 */
0x00, /* F8 */
0x00, /* F9 */
0x00, /* FA */
0x00, /* FB */
0x00, /* FC */
0x00, /* FD */
0x00, /* FE */
0x00, /* FF */
};
static struct {
int readable;
int writable;
} max98095_access[M98095_REG_CNT] = {
{ 0x00, 0x00 }, /* 00 */
{ 0xFF, 0x00 }, /* 01 */
{ 0xFF, 0x00 }, /* 02 */
{ 0xFF, 0x00 }, /* 03 */
{ 0xFF, 0x00 }, /* 04 */
{ 0xFF, 0x00 }, /* 05 */
{ 0xFF, 0x00 }, /* 06 */
{ 0xFF, 0x00 }, /* 07 */
{ 0xFF, 0x00 }, /* 08 */
{ 0xFF, 0x00 }, /* 09 */
{ 0xFF, 0x00 }, /* 0A */
{ 0xFF, 0x00 }, /* 0B */
{ 0xFF, 0x00 }, /* 0C */
{ 0xFF, 0x00 }, /* 0D */
{ 0xFF, 0x00 }, /* 0E */
{ 0xFF, 0x9F }, /* 0F */
{ 0xFF, 0xFF }, /* 10 */
{ 0xFF, 0xFF }, /* 11 */
{ 0xFF, 0xFF }, /* 12 */
{ 0xFF, 0xFF }, /* 13 */
{ 0xFF, 0xFF }, /* 14 */
{ 0xFF, 0xFF }, /* 15 */
{ 0xFF, 0xFF }, /* 16 */
{ 0xFF, 0xFF }, /* 17 */
{ 0xFF, 0xFF }, /* 18 */
{ 0xFF, 0xFF }, /* 19 */
{ 0xFF, 0xFF }, /* 1A */
{ 0xFF, 0xFF }, /* 1B */
{ 0xFF, 0xFF }, /* 1C */
{ 0xFF, 0xFF }, /* 1D */
{ 0xFF, 0x77 }, /* 1E */
{ 0xFF, 0x77 }, /* 1F */
{ 0xFF, 0x77 }, /* 20 */
{ 0xFF, 0x77 }, /* 21 */
{ 0xFF, 0x77 }, /* 22 */
{ 0xFF, 0x77 }, /* 23 */
{ 0xFF, 0xFF }, /* 24 */
{ 0xFF, 0x7F }, /* 25 */
{ 0xFF, 0x31 }, /* 26 */
{ 0xFF, 0xFF }, /* 27 */
{ 0xFF, 0xFF }, /* 28 */
{ 0xFF, 0xFF }, /* 29 */
{ 0xFF, 0xF7 }, /* 2A */
{ 0xFF, 0x2F }, /* 2B */
{ 0xFF, 0xEF }, /* 2C */
{ 0xFF, 0xFF }, /* 2D */
{ 0xFF, 0xFF }, /* 2E */
{ 0xFF, 0xFF }, /* 2F */
{ 0xFF, 0xFF }, /* 30 */
{ 0xFF, 0xFF }, /* 31 */
{ 0xFF, 0xFF }, /* 32 */
{ 0xFF, 0xFF }, /* 33 */
{ 0xFF, 0xF7 }, /* 34 */
{ 0xFF, 0x2F }, /* 35 */
{ 0xFF, 0xCF }, /* 36 */
{ 0xFF, 0xFF }, /* 37 */
{ 0xFF, 0xFF }, /* 38 */
{ 0xFF, 0xFF }, /* 39 */
{ 0xFF, 0xFF }, /* 3A */
{ 0xFF, 0xFF }, /* 3B */
{ 0xFF, 0xFF }, /* 3C */
{ 0xFF, 0xFF }, /* 3D */
{ 0xFF, 0xF7 }, /* 3E */
{ 0xFF, 0x2F }, /* 3F */
{ 0xFF, 0xCF }, /* 40 */
{ 0xFF, 0xFF }, /* 41 */
{ 0xFF, 0x77 }, /* 42 */
{ 0xFF, 0xFF }, /* 43 */
{ 0xFF, 0xFF }, /* 44 */
{ 0xFF, 0xFF }, /* 45 */
{ 0xFF, 0xFF }, /* 46 */
{ 0xFF, 0xFF }, /* 47 */
{ 0xFF, 0xFF }, /* 48 */
{ 0xFF, 0x0F }, /* 49 */
{ 0xFF, 0xFF }, /* 4A */
{ 0xFF, 0xFF }, /* 4B */
{ 0xFF, 0x3F }, /* 4C */
{ 0xFF, 0x3F }, /* 4D */
{ 0xFF, 0x3F }, /* 4E */
{ 0xFF, 0xFF }, /* 4F */
{ 0xFF, 0x7F }, /* 50 */
{ 0xFF, 0x7F }, /* 51 */
{ 0xFF, 0x0F }, /* 52 */
{ 0xFF, 0x3F }, /* 53 */
{ 0xFF, 0x3F }, /* 54 */
{ 0xFF, 0x3F }, /* 55 */
{ 0xFF, 0xFF }, /* 56 */
{ 0xFF, 0xFF }, /* 57 */
{ 0xFF, 0xBF }, /* 58 */
{ 0xFF, 0x1F }, /* 59 */
{ 0xFF, 0xBF }, /* 5A */
{ 0xFF, 0x1F }, /* 5B */
{ 0xFF, 0xBF }, /* 5C */
{ 0xFF, 0x3F }, /* 5D */
{ 0xFF, 0x3F }, /* 5E */
{ 0xFF, 0x7F }, /* 5F */
{ 0xFF, 0x7F }, /* 60 */
{ 0xFF, 0x47 }, /* 61 */
{ 0xFF, 0x9F }, /* 62 */
{ 0xFF, 0x9F }, /* 63 */
{ 0xFF, 0x9F }, /* 64 */
{ 0xFF, 0x9F }, /* 65 */
{ 0xFF, 0x9F }, /* 66 */
{ 0xFF, 0xBF }, /* 67 */
{ 0xFF, 0xBF }, /* 68 */
{ 0xFF, 0xFF }, /* 69 */
{ 0xFF, 0xFF }, /* 6A */
{ 0xFF, 0x7F }, /* 6B */
{ 0xFF, 0xF7 }, /* 6C */
{ 0xFF, 0xFF }, /* 6D */
{ 0xFF, 0xFF }, /* 6E */
{ 0xFF, 0x1F }, /* 6F */
{ 0xFF, 0xF7 }, /* 70 */
{ 0xFF, 0xFF }, /* 71 */
{ 0xFF, 0xFF }, /* 72 */
{ 0xFF, 0x1F }, /* 73 */
{ 0xFF, 0xF7 }, /* 74 */
{ 0xFF, 0xFF }, /* 75 */
{ 0xFF, 0xFF }, /* 76 */
{ 0xFF, 0x1F }, /* 77 */
{ 0xFF, 0xF7 }, /* 78 */
{ 0xFF, 0xFF }, /* 79 */
{ 0xFF, 0xFF }, /* 7A */
{ 0xFF, 0x1F }, /* 7B */
{ 0xFF, 0xF7 }, /* 7C */
{ 0xFF, 0xFF }, /* 7D */
{ 0xFF, 0xFF }, /* 7E */
{ 0xFF, 0x1F }, /* 7F */
{ 0xFF, 0xF7 }, /* 80 */
{ 0xFF, 0xFF }, /* 81 */
{ 0xFF, 0xFF }, /* 82 */
{ 0xFF, 0x1F }, /* 83 */
{ 0xFF, 0x7F }, /* 84 */
{ 0xFF, 0x0F }, /* 85 */
{ 0xFF, 0xD8 }, /* 86 */
{ 0xFF, 0xFF }, /* 87 */
{ 0xFF, 0xEF }, /* 88 */
{ 0xFF, 0xFE }, /* 89 */
{ 0xFF, 0xFE }, /* 8A */
{ 0xFF, 0xFF }, /* 8B */
{ 0xFF, 0xFF }, /* 8C */
{ 0xFF, 0x3F }, /* 8D */
{ 0xFF, 0xFF }, /* 8E */
{ 0xFF, 0x3F }, /* 8F */
{ 0xFF, 0x8F }, /* 90 */
{ 0xFF, 0xFF }, /* 91 */
{ 0xFF, 0x3F }, /* 92 */
{ 0xFF, 0xFF }, /* 93 */
{ 0xFF, 0xFF }, /* 94 */
{ 0xFF, 0x0F }, /* 95 */
{ 0xFF, 0x3F }, /* 96 */
{ 0xFF, 0x8C }, /* 97 */
{ 0x00, 0x00 }, /* 98 */
{ 0x00, 0x00 }, /* 99 */
{ 0x00, 0x00 }, /* 9A */
{ 0x00, 0x00 }, /* 9B */
{ 0x00, 0x00 }, /* 9C */
{ 0x00, 0x00 }, /* 9D */
{ 0x00, 0x00 }, /* 9E */
{ 0x00, 0x00 }, /* 9F */
{ 0x00, 0x00 }, /* A0 */
{ 0x00, 0x00 }, /* A1 */
{ 0x00, 0x00 }, /* A2 */
{ 0x00, 0x00 }, /* A3 */
{ 0x00, 0x00 }, /* A4 */
{ 0x00, 0x00 }, /* A5 */
{ 0x00, 0x00 }, /* A6 */
{ 0x00, 0x00 }, /* A7 */
{ 0x00, 0x00 }, /* A8 */
{ 0x00, 0x00 }, /* A9 */
{ 0x00, 0x00 }, /* AA */
{ 0x00, 0x00 }, /* AB */
{ 0x00, 0x00 }, /* AC */
{ 0x00, 0x00 }, /* AD */
{ 0x00, 0x00 }, /* AE */
{ 0x00, 0x00 }, /* AF */
{ 0x00, 0x00 }, /* B0 */
{ 0x00, 0x00 }, /* B1 */
{ 0x00, 0x00 }, /* B2 */
{ 0x00, 0x00 }, /* B3 */
{ 0x00, 0x00 }, /* B4 */
{ 0x00, 0x00 }, /* B5 */
{ 0x00, 0x00 }, /* B6 */
{ 0x00, 0x00 }, /* B7 */
{ 0x00, 0x00 }, /* B8 */
{ 0x00, 0x00 }, /* B9 */
{ 0x00, 0x00 }, /* BA */
{ 0x00, 0x00 }, /* BB */
{ 0x00, 0x00 }, /* BC */
{ 0x00, 0x00 }, /* BD */
{ 0x00, 0x00 }, /* BE */
{ 0x00, 0x00 }, /* BF */
{ 0x00, 0x00 }, /* C0 */
{ 0x00, 0x00 }, /* C1 */
{ 0x00, 0x00 }, /* C2 */
{ 0x00, 0x00 }, /* C3 */
{ 0x00, 0x00 }, /* C4 */
{ 0x00, 0x00 }, /* C5 */
{ 0x00, 0x00 }, /* C6 */
{ 0x00, 0x00 }, /* C7 */
{ 0x00, 0x00 }, /* C8 */
{ 0x00, 0x00 }, /* C9 */
{ 0x00, 0x00 }, /* CA */
{ 0x00, 0x00 }, /* CB */
{ 0x00, 0x00 }, /* CC */
{ 0x00, 0x00 }, /* CD */
{ 0x00, 0x00 }, /* CE */
{ 0x00, 0x00 }, /* CF */
{ 0x00, 0x00 }, /* D0 */
{ 0x00, 0x00 }, /* D1 */
{ 0x00, 0x00 }, /* D2 */
{ 0x00, 0x00 }, /* D3 */
{ 0x00, 0x00 }, /* D4 */
{ 0x00, 0x00 }, /* D5 */
{ 0x00, 0x00 }, /* D6 */
{ 0x00, 0x00 }, /* D7 */
{ 0x00, 0x00 }, /* D8 */
{ 0x00, 0x00 }, /* D9 */
{ 0x00, 0x00 }, /* DA */
{ 0x00, 0x00 }, /* DB */
{ 0x00, 0x00 }, /* DC */
{ 0x00, 0x00 }, /* DD */
{ 0x00, 0x00 }, /* DE */
{ 0x00, 0x00 }, /* DF */
{ 0x00, 0x00 }, /* E0 */
{ 0x00, 0x00 }, /* E1 */
{ 0x00, 0x00 }, /* E2 */
{ 0x00, 0x00 }, /* E3 */
{ 0x00, 0x00 }, /* E4 */
{ 0x00, 0x00 }, /* E5 */
{ 0x00, 0x00 }, /* E6 */
{ 0x00, 0x00 }, /* E7 */
{ 0x00, 0x00 }, /* E8 */
{ 0x00, 0x00 }, /* E9 */
{ 0x00, 0x00 }, /* EA */
{ 0x00, 0x00 }, /* EB */
{ 0x00, 0x00 }, /* EC */
{ 0x00, 0x00 }, /* ED */
{ 0x00, 0x00 }, /* EE */
{ 0x00, 0x00 }, /* EF */
{ 0x00, 0x00 }, /* F0 */
{ 0x00, 0x00 }, /* F1 */
{ 0x00, 0x00 }, /* F2 */
{ 0x00, 0x00 }, /* F3 */
{ 0x00, 0x00 }, /* F4 */
{ 0x00, 0x00 }, /* F5 */
{ 0x00, 0x00 }, /* F6 */
{ 0x00, 0x00 }, /* F7 */
{ 0x00, 0x00 }, /* F8 */
{ 0x00, 0x00 }, /* F9 */
{ 0x00, 0x00 }, /* FA */
{ 0x00, 0x00 }, /* FB */
{ 0x00, 0x00 }, /* FC */
{ 0x00, 0x00 }, /* FD */
{ 0x00, 0x00 }, /* FE */
{ 0xFF, 0x00 }, /* FF */
};
static int max98095_readable(struct snd_soc_codec *codec, unsigned int reg)
{
if (reg >= M98095_REG_CNT)
return 0;
return max98095_access[reg].readable != 0;
}
static int max98095_volatile(struct snd_soc_codec *codec, unsigned int reg)
{
if (reg > M98095_REG_MAX_CACHED)
return 1;
switch (reg) {
case M98095_000_HOST_DATA:
case M98095_001_HOST_INT_STS:
case M98095_002_HOST_RSP_STS:
case M98095_003_HOST_CMD_STS:
case M98095_004_CODEC_STS:
case M98095_005_DAI1_ALC_STS:
case M98095_006_DAI2_ALC_STS:
case M98095_007_JACK_AUTO_STS:
case M98095_008_JACK_MANUAL_STS:
case M98095_009_JACK_VBAT_STS:
case M98095_00A_ACC_ADC_STS:
case M98095_00B_MIC_NG_AGC_STS:
case M98095_00C_SPK_L_VOLT_STS:
case M98095_00D_SPK_R_VOLT_STS:
case M98095_00E_TEMP_SENSOR_STS:
return 1;
}
return 0;
}
/*
* Filter coefficients are in a separate register segment
* and they share the address space of the normal registers.
* The coefficient registers do not need or share the cache.
*/
static int max98095_hw_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
int ret;
codec->cache_bypass = 1;
ret = snd_soc_write(codec, reg, value);
codec->cache_bypass = 0;
return ret ? -EIO : 0;
}
/*
* Load equalizer DSP coefficient configurations registers
*/
static void m98095_eq_band(struct snd_soc_codec *codec, unsigned int dai,
unsigned int band, u16 *coefs)
{
unsigned int eq_reg;
unsigned int i;
BUG_ON(band > 4);
BUG_ON(dai > 1);
/* Load the base register address */
eq_reg = dai ? M98095_142_DAI2_EQ_BASE : M98095_110_DAI1_EQ_BASE;
/* Add the band address offset, note adjustment for word address */
eq_reg += band * (M98095_COEFS_PER_BAND << 1);
/* Step through the registers and coefs */
for (i = 0; i < M98095_COEFS_PER_BAND; i++) {
max98095_hw_write(codec, eq_reg++, M98095_BYTE1(coefs[i]));
max98095_hw_write(codec, eq_reg++, M98095_BYTE0(coefs[i]));
}
}
/*
* Load biquad filter coefficient configurations registers
*/
static void m98095_biquad_band(struct snd_soc_codec *codec, unsigned int dai,
unsigned int band, u16 *coefs)
{
unsigned int bq_reg;
unsigned int i;
BUG_ON(band > 1);
BUG_ON(dai > 1);
/* Load the base register address */
bq_reg = dai ? M98095_17E_DAI2_BQ_BASE : M98095_174_DAI1_BQ_BASE;
/* Add the band address offset, note adjustment for word address */
bq_reg += band * (M98095_COEFS_PER_BAND << 1);
/* Step through the registers and coefs */
for (i = 0; i < M98095_COEFS_PER_BAND; i++) {
max98095_hw_write(codec, bq_reg++, M98095_BYTE1(coefs[i]));
max98095_hw_write(codec, bq_reg++, M98095_BYTE0(coefs[i]));
}
}
static const char * const max98095_fltr_mode[] = { "Voice", "Music" };
static const struct soc_enum max98095_dai1_filter_mode_enum[] = {
SOC_ENUM_SINGLE(M98095_02E_DAI1_FILTERS, 7, 2, max98095_fltr_mode),
};
static const struct soc_enum max98095_dai2_filter_mode_enum[] = {
SOC_ENUM_SINGLE(M98095_038_DAI2_FILTERS, 7, 2, max98095_fltr_mode),
};
static const char * const max98095_extmic_text[] = { "None", "MIC1", "MIC2" };
static const struct soc_enum max98095_extmic_enum =
SOC_ENUM_SINGLE(M98095_087_CFG_MIC, 0, 3, max98095_extmic_text);
static const struct snd_kcontrol_new max98095_extmic_mux =
SOC_DAPM_ENUM("External MIC Mux", max98095_extmic_enum);
static const char * const max98095_linein_text[] = { "INA", "INB" };
static const struct soc_enum max98095_linein_enum =
SOC_ENUM_SINGLE(M98095_086_CFG_LINE, 6, 2, max98095_linein_text);
static const struct snd_kcontrol_new max98095_linein_mux =
SOC_DAPM_ENUM("Linein Input Mux", max98095_linein_enum);
static const char * const max98095_line_mode_text[] = {
"Stereo", "Differential"};
static const struct soc_enum max98095_linein_mode_enum =
SOC_ENUM_SINGLE(M98095_086_CFG_LINE, 7, 2, max98095_line_mode_text);
static const struct soc_enum max98095_lineout_mode_enum =
SOC_ENUM_SINGLE(M98095_086_CFG_LINE, 4, 2, max98095_line_mode_text);
static const char * const max98095_dai_fltr[] = {
"Off", "Elliptical-HPF-16k", "Butterworth-HPF-16k",
"Elliptical-HPF-8k", "Butterworth-HPF-8k", "Butterworth-HPF-Fs/240"};
static const struct soc_enum max98095_dai1_dac_filter_enum[] = {
SOC_ENUM_SINGLE(M98095_02E_DAI1_FILTERS, 0, 6, max98095_dai_fltr),
};
static const struct soc_enum max98095_dai2_dac_filter_enum[] = {
SOC_ENUM_SINGLE(M98095_038_DAI2_FILTERS, 0, 6, max98095_dai_fltr),
};
static const struct soc_enum max98095_dai3_dac_filter_enum[] = {
SOC_ENUM_SINGLE(M98095_042_DAI3_FILTERS, 0, 6, max98095_dai_fltr),
};
static int max98095_mic1pre_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
unsigned int sel = ucontrol->value.integer.value[0];
max98095->mic1pre = sel;
snd_soc_update_bits(codec, M98095_05F_LVL_MIC1, M98095_MICPRE_MASK,
(1+sel)<<M98095_MICPRE_SHIFT);
return 0;
}
static int max98095_mic1pre_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = max98095->mic1pre;
return 0;
}
static int max98095_mic2pre_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
unsigned int sel = ucontrol->value.integer.value[0];
max98095->mic2pre = sel;
snd_soc_update_bits(codec, M98095_060_LVL_MIC2, M98095_MICPRE_MASK,
(1+sel)<<M98095_MICPRE_SHIFT);
return 0;
}
static int max98095_mic2pre_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = max98095->mic2pre;
return 0;
}
static const unsigned int max98095_micboost_tlv[] = {
TLV_DB_RANGE_HEAD(2),
0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
};
static const DECLARE_TLV_DB_SCALE(max98095_mic_tlv, 0, 100, 0);
static const DECLARE_TLV_DB_SCALE(max98095_adc_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(max98095_adcboost_tlv, 0, 600, 0);
static const unsigned int max98095_hp_tlv[] = {
TLV_DB_RANGE_HEAD(5),
0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
};
static const unsigned int max98095_spk_tlv[] = {
TLV_DB_RANGE_HEAD(4),
0, 10, TLV_DB_SCALE_ITEM(-5900, 400, 0),
11, 18, TLV_DB_SCALE_ITEM(-1700, 200, 0),
19, 27, TLV_DB_SCALE_ITEM(-200, 100, 0),
28, 39, TLV_DB_SCALE_ITEM(650, 50, 0),
};
static const unsigned int max98095_rcv_lout_tlv[] = {
TLV_DB_RANGE_HEAD(5),
0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
};
static const unsigned int max98095_lin_tlv[] = {
TLV_DB_RANGE_HEAD(3),
0, 2, TLV_DB_SCALE_ITEM(-600, 300, 0),
3, 3, TLV_DB_SCALE_ITEM(300, 1100, 0),
4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0),
};
static const struct snd_kcontrol_new max98095_snd_controls[] = {
SOC_DOUBLE_R_TLV("Headphone Volume", M98095_064_LVL_HP_L,
M98095_065_LVL_HP_R, 0, 31, 0, max98095_hp_tlv),
SOC_DOUBLE_R_TLV("Speaker Volume", M98095_067_LVL_SPK_L,
M98095_068_LVL_SPK_R, 0, 39, 0, max98095_spk_tlv),
SOC_SINGLE_TLV("Receiver Volume", M98095_066_LVL_RCV,
0, 31, 0, max98095_rcv_lout_tlv),
SOC_DOUBLE_R_TLV("Lineout Volume", M98095_062_LVL_LINEOUT1,
M98095_063_LVL_LINEOUT2, 0, 31, 0, max98095_rcv_lout_tlv),
SOC_DOUBLE_R("Headphone Switch", M98095_064_LVL_HP_L,
M98095_065_LVL_HP_R, 7, 1, 1),
SOC_DOUBLE_R("Speaker Switch", M98095_067_LVL_SPK_L,
M98095_068_LVL_SPK_R, 7, 1, 1),
SOC_SINGLE("Receiver Switch", M98095_066_LVL_RCV, 7, 1, 1),
SOC_DOUBLE_R("Lineout Switch", M98095_062_LVL_LINEOUT1,
M98095_063_LVL_LINEOUT2, 7, 1, 1),
SOC_SINGLE_TLV("MIC1 Volume", M98095_05F_LVL_MIC1, 0, 20, 1,
max98095_mic_tlv),
SOC_SINGLE_TLV("MIC2 Volume", M98095_060_LVL_MIC2, 0, 20, 1,
max98095_mic_tlv),
SOC_SINGLE_EXT_TLV("MIC1 Boost Volume",
M98095_05F_LVL_MIC1, 5, 2, 0,
max98095_mic1pre_get, max98095_mic1pre_set,
max98095_micboost_tlv),
SOC_SINGLE_EXT_TLV("MIC2 Boost Volume",
M98095_060_LVL_MIC2, 5, 2, 0,
max98095_mic2pre_get, max98095_mic2pre_set,
max98095_micboost_tlv),
SOC_SINGLE_TLV("Linein Volume", M98095_061_LVL_LINEIN, 0, 5, 1,
max98095_lin_tlv),
SOC_SINGLE_TLV("ADCL Volume", M98095_05D_LVL_ADC_L, 0, 15, 1,
max98095_adc_tlv),
SOC_SINGLE_TLV("ADCR Volume", M98095_05E_LVL_ADC_R, 0, 15, 1,
max98095_adc_tlv),
SOC_SINGLE_TLV("ADCL Boost Volume", M98095_05D_LVL_ADC_L, 4, 3, 0,
max98095_adcboost_tlv),
SOC_SINGLE_TLV("ADCR Boost Volume", M98095_05E_LVL_ADC_R, 4, 3, 0,
max98095_adcboost_tlv),
SOC_SINGLE("EQ1 Switch", M98095_088_CFG_LEVEL, 0, 1, 0),
SOC_SINGLE("EQ2 Switch", M98095_088_CFG_LEVEL, 1, 1, 0),
SOC_SINGLE("Biquad1 Switch", M98095_088_CFG_LEVEL, 2, 1, 0),
SOC_SINGLE("Biquad2 Switch", M98095_088_CFG_LEVEL, 3, 1, 0),
SOC_ENUM("DAI1 Filter Mode", max98095_dai1_filter_mode_enum),
SOC_ENUM("DAI2 Filter Mode", max98095_dai2_filter_mode_enum),
SOC_ENUM("DAI1 DAC Filter", max98095_dai1_dac_filter_enum),
SOC_ENUM("DAI2 DAC Filter", max98095_dai2_dac_filter_enum),
SOC_ENUM("DAI3 DAC Filter", max98095_dai3_dac_filter_enum),
SOC_ENUM("Linein Mode", max98095_linein_mode_enum),
SOC_ENUM("Lineout Mode", max98095_lineout_mode_enum),
};
/* Left speaker mixer switch */
static const struct snd_kcontrol_new max98095_left_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC1 Switch", M98095_050_MIX_SPK_LEFT, 0, 1, 0),
SOC_DAPM_SINGLE("Right DAC1 Switch", M98095_050_MIX_SPK_LEFT, 6, 1, 0),
SOC_DAPM_SINGLE("Mono DAC2 Switch", M98095_050_MIX_SPK_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("Mono DAC3 Switch", M98095_050_MIX_SPK_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98095_050_MIX_SPK_LEFT, 4, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_050_MIX_SPK_LEFT, 5, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_050_MIX_SPK_LEFT, 1, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_050_MIX_SPK_LEFT, 2, 1, 0),
};
/* Right speaker mixer switch */
static const struct snd_kcontrol_new max98095_right_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC1 Switch", M98095_051_MIX_SPK_RIGHT, 6, 1, 0),
SOC_DAPM_SINGLE("Right DAC1 Switch", M98095_051_MIX_SPK_RIGHT, 0, 1, 0),
SOC_DAPM_SINGLE("Mono DAC2 Switch", M98095_051_MIX_SPK_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("Mono DAC3 Switch", M98095_051_MIX_SPK_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98095_051_MIX_SPK_RIGHT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_051_MIX_SPK_RIGHT, 4, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_051_MIX_SPK_RIGHT, 1, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_051_MIX_SPK_RIGHT, 2, 1, 0),
};
/* Left headphone mixer switch */
static const struct snd_kcontrol_new max98095_left_hp_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC1 Switch", M98095_04C_MIX_HP_LEFT, 0, 1, 0),
SOC_DAPM_SINGLE("Right DAC1 Switch", M98095_04C_MIX_HP_LEFT, 5, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98095_04C_MIX_HP_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_04C_MIX_HP_LEFT, 4, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_04C_MIX_HP_LEFT, 1, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_04C_MIX_HP_LEFT, 2, 1, 0),
};
/* Right headphone mixer switch */
static const struct snd_kcontrol_new max98095_right_hp_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC1 Switch", M98095_04D_MIX_HP_RIGHT, 5, 1, 0),
SOC_DAPM_SINGLE("Right DAC1 Switch", M98095_04D_MIX_HP_RIGHT, 0, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98095_04D_MIX_HP_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_04D_MIX_HP_RIGHT, 4, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_04D_MIX_HP_RIGHT, 1, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_04D_MIX_HP_RIGHT, 2, 1, 0),
};
/* Receiver earpiece mixer switch */
static const struct snd_kcontrol_new max98095_mono_rcv_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC1 Switch", M98095_04F_MIX_RCV, 0, 1, 0),
SOC_DAPM_SINGLE("Right DAC1 Switch", M98095_04F_MIX_RCV, 5, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98095_04F_MIX_RCV, 3, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_04F_MIX_RCV, 4, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_04F_MIX_RCV, 1, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_04F_MIX_RCV, 2, 1, 0),
};
/* Left lineout mixer switch */
static const struct snd_kcontrol_new max98095_left_lineout_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC1 Switch", M98095_053_MIX_LINEOUT1, 5, 1, 0),
SOC_DAPM_SINGLE("Right DAC1 Switch", M98095_053_MIX_LINEOUT1, 0, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98095_053_MIX_LINEOUT1, 3, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_053_MIX_LINEOUT1, 4, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_053_MIX_LINEOUT1, 1, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_053_MIX_LINEOUT1, 2, 1, 0),
};
/* Right lineout mixer switch */
static const struct snd_kcontrol_new max98095_right_lineout_mixer_controls[] = {
SOC_DAPM_SINGLE("Left DAC1 Switch", M98095_054_MIX_LINEOUT2, 0, 1, 0),
SOC_DAPM_SINGLE("Right DAC1 Switch", M98095_054_MIX_LINEOUT2, 5, 1, 0),
SOC_DAPM_SINGLE("MIC1 Switch", M98095_054_MIX_LINEOUT2, 3, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_054_MIX_LINEOUT2, 4, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_054_MIX_LINEOUT2, 1, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_054_MIX_LINEOUT2, 2, 1, 0),
};
/* Left ADC mixer switch */
static const struct snd_kcontrol_new max98095_left_ADC_mixer_controls[] = {
SOC_DAPM_SINGLE("MIC1 Switch", M98095_04A_MIX_ADC_LEFT, 7, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_04A_MIX_ADC_LEFT, 6, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_04A_MIX_ADC_LEFT, 3, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_04A_MIX_ADC_LEFT, 2, 1, 0),
};
/* Right ADC mixer switch */
static const struct snd_kcontrol_new max98095_right_ADC_mixer_controls[] = {
SOC_DAPM_SINGLE("MIC1 Switch", M98095_04B_MIX_ADC_RIGHT, 7, 1, 0),
SOC_DAPM_SINGLE("MIC2 Switch", M98095_04B_MIX_ADC_RIGHT, 6, 1, 0),
SOC_DAPM_SINGLE("IN1 Switch", M98095_04B_MIX_ADC_RIGHT, 3, 1, 0),
SOC_DAPM_SINGLE("IN2 Switch", M98095_04B_MIX_ADC_RIGHT, 2, 1, 0),
};
static int max98095_mic_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
if (w->reg == M98095_05F_LVL_MIC1) {
snd_soc_update_bits(codec, w->reg, M98095_MICPRE_MASK,
(1+max98095->mic1pre)<<M98095_MICPRE_SHIFT);
} else {
snd_soc_update_bits(codec, w->reg, M98095_MICPRE_MASK,
(1+max98095->mic2pre)<<M98095_MICPRE_SHIFT);
}
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, w->reg, M98095_MICPRE_MASK, 0);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* The line inputs are stereo inputs with the left and right
* channels sharing a common PGA power control signal.
*/
static int max98095_line_pga(struct snd_soc_dapm_widget *w,
int event, u8 channel)
{
struct snd_soc_codec *codec = w->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
u8 *state;
BUG_ON(!((channel == 1) || (channel == 2)));
state = &max98095->lin_state;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
*state |= channel;
snd_soc_update_bits(codec, w->reg,
(1 << w->shift), (1 << w->shift));
break;
case SND_SOC_DAPM_POST_PMD:
*state &= ~channel;
if (*state == 0) {
snd_soc_update_bits(codec, w->reg,
(1 << w->shift), 0);
}
break;
default:
return -EINVAL;
}
return 0;
}
static int max98095_pga_in1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
return max98095_line_pga(w, event, 1);
}
static int max98095_pga_in2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
return max98095_line_pga(w, event, 2);
}
/*
* The stereo line out mixer outputs to two stereo line outs.
* The 2nd pair has a separate set of enables.
*/
static int max98095_lineout_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, w->reg,
(1 << (w->shift+2)), (1 << (w->shift+2)));
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, w->reg,
(1 << (w->shift+2)), 0);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct snd_soc_dapm_widget max98095_dapm_widgets[] = {
SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", M98095_090_PWR_EN_IN, 0, 0),
SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", M98095_090_PWR_EN_IN, 1, 0),
SND_SOC_DAPM_DAC("DACL1", "HiFi Playback",
M98095_091_PWR_EN_OUT, 0, 0),
SND_SOC_DAPM_DAC("DACR1", "HiFi Playback",
M98095_091_PWR_EN_OUT, 1, 0),
SND_SOC_DAPM_DAC("DACM2", "Aux Playback",
M98095_091_PWR_EN_OUT, 2, 0),
SND_SOC_DAPM_DAC("DACM3", "Voice Playback",
M98095_091_PWR_EN_OUT, 2, 0),
SND_SOC_DAPM_PGA("HP Left Out", M98095_091_PWR_EN_OUT,
6, 0, NULL, 0),
SND_SOC_DAPM_PGA("HP Right Out", M98095_091_PWR_EN_OUT,
7, 0, NULL, 0),
SND_SOC_DAPM_PGA("SPK Left Out", M98095_091_PWR_EN_OUT,
4, 0, NULL, 0),
SND_SOC_DAPM_PGA("SPK Right Out", M98095_091_PWR_EN_OUT,
5, 0, NULL, 0),
SND_SOC_DAPM_PGA("RCV Mono Out", M98095_091_PWR_EN_OUT,
3, 0, NULL, 0),
SND_SOC_DAPM_PGA_E("LINE Left Out", M98095_092_PWR_EN_OUT,
0, 0, NULL, 0, max98095_lineout_event, SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_PGA_E("LINE Right Out", M98095_092_PWR_EN_OUT,
1, 0, NULL, 0, max98095_lineout_event, SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_MUX("External MIC", SND_SOC_NOPM, 0, 0,
&max98095_extmic_mux),
SND_SOC_DAPM_MUX("Linein Mux", SND_SOC_NOPM, 0, 0,
&max98095_linein_mux),
SND_SOC_DAPM_MIXER("Left Headphone Mixer", SND_SOC_NOPM, 0, 0,
&max98095_left_hp_mixer_controls[0],
ARRAY_SIZE(max98095_left_hp_mixer_controls)),
SND_SOC_DAPM_MIXER("Right Headphone Mixer", SND_SOC_NOPM, 0, 0,
&max98095_right_hp_mixer_controls[0],
ARRAY_SIZE(max98095_right_hp_mixer_controls)),
SND_SOC_DAPM_MIXER("Left Speaker Mixer", SND_SOC_NOPM, 0, 0,
&max98095_left_speaker_mixer_controls[0],
ARRAY_SIZE(max98095_left_speaker_mixer_controls)),
SND_SOC_DAPM_MIXER("Right Speaker Mixer", SND_SOC_NOPM, 0, 0,
&max98095_right_speaker_mixer_controls[0],
ARRAY_SIZE(max98095_right_speaker_mixer_controls)),
SND_SOC_DAPM_MIXER("Receiver Mixer", SND_SOC_NOPM, 0, 0,
&max98095_mono_rcv_mixer_controls[0],
ARRAY_SIZE(max98095_mono_rcv_mixer_controls)),
SND_SOC_DAPM_MIXER("Left Lineout Mixer", SND_SOC_NOPM, 0, 0,
&max98095_left_lineout_mixer_controls[0],
ARRAY_SIZE(max98095_left_lineout_mixer_controls)),
SND_SOC_DAPM_MIXER("Right Lineout Mixer", SND_SOC_NOPM, 0, 0,
&max98095_right_lineout_mixer_controls[0],
ARRAY_SIZE(max98095_right_lineout_mixer_controls)),
SND_SOC_DAPM_MIXER("Left ADC Mixer", SND_SOC_NOPM, 0, 0,
&max98095_left_ADC_mixer_controls[0],
ARRAY_SIZE(max98095_left_ADC_mixer_controls)),
SND_SOC_DAPM_MIXER("Right ADC Mixer", SND_SOC_NOPM, 0, 0,
&max98095_right_ADC_mixer_controls[0],
ARRAY_SIZE(max98095_right_ADC_mixer_controls)),
SND_SOC_DAPM_PGA_E("MIC1 Input", M98095_05F_LVL_MIC1,
5, 0, NULL, 0, max98095_mic_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("MIC2 Input", M98095_060_LVL_MIC2,
5, 0, NULL, 0, max98095_mic_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("IN1 Input", M98095_090_PWR_EN_IN,
7, 0, NULL, 0, max98095_pga_in1_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("IN2 Input", M98095_090_PWR_EN_IN,
7, 0, NULL, 0, max98095_pga_in2_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MICBIAS("MICBIAS1", M98095_090_PWR_EN_IN, 2, 0),
SND_SOC_DAPM_MICBIAS("MICBIAS2", M98095_090_PWR_EN_IN, 3, 0),
SND_SOC_DAPM_OUTPUT("HPL"),
SND_SOC_DAPM_OUTPUT("HPR"),
SND_SOC_DAPM_OUTPUT("SPKL"),
SND_SOC_DAPM_OUTPUT("SPKR"),
SND_SOC_DAPM_OUTPUT("RCV"),
SND_SOC_DAPM_OUTPUT("OUT1"),
SND_SOC_DAPM_OUTPUT("OUT2"),
SND_SOC_DAPM_OUTPUT("OUT3"),
SND_SOC_DAPM_OUTPUT("OUT4"),
SND_SOC_DAPM_INPUT("MIC1"),
SND_SOC_DAPM_INPUT("MIC2"),
SND_SOC_DAPM_INPUT("INA1"),
SND_SOC_DAPM_INPUT("INA2"),
SND_SOC_DAPM_INPUT("INB1"),
SND_SOC_DAPM_INPUT("INB2"),
};
static const struct snd_soc_dapm_route max98095_audio_map[] = {
/* Left headphone output mixer */
{"Left Headphone Mixer", "Left DAC1 Switch", "DACL1"},
{"Left Headphone Mixer", "Right DAC1 Switch", "DACR1"},
{"Left Headphone Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left Headphone Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left Headphone Mixer", "IN1 Switch", "IN1 Input"},
{"Left Headphone Mixer", "IN2 Switch", "IN2 Input"},
/* Right headphone output mixer */
{"Right Headphone Mixer", "Left DAC1 Switch", "DACL1"},
{"Right Headphone Mixer", "Right DAC1 Switch", "DACR1"},
{"Right Headphone Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right Headphone Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right Headphone Mixer", "IN1 Switch", "IN1 Input"},
{"Right Headphone Mixer", "IN2 Switch", "IN2 Input"},
/* Left speaker output mixer */
{"Left Speaker Mixer", "Left DAC1 Switch", "DACL1"},
{"Left Speaker Mixer", "Right DAC1 Switch", "DACR1"},
{"Left Speaker Mixer", "Mono DAC2 Switch", "DACM2"},
{"Left Speaker Mixer", "Mono DAC3 Switch", "DACM3"},
{"Left Speaker Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left Speaker Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left Speaker Mixer", "IN1 Switch", "IN1 Input"},
{"Left Speaker Mixer", "IN2 Switch", "IN2 Input"},
/* Right speaker output mixer */
{"Right Speaker Mixer", "Left DAC1 Switch", "DACL1"},
{"Right Speaker Mixer", "Right DAC1 Switch", "DACR1"},
{"Right Speaker Mixer", "Mono DAC2 Switch", "DACM2"},
{"Right Speaker Mixer", "Mono DAC3 Switch", "DACM3"},
{"Right Speaker Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right Speaker Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right Speaker Mixer", "IN1 Switch", "IN1 Input"},
{"Right Speaker Mixer", "IN2 Switch", "IN2 Input"},
/* Earpiece/Receiver output mixer */
{"Receiver Mixer", "Left DAC1 Switch", "DACL1"},
{"Receiver Mixer", "Right DAC1 Switch", "DACR1"},
{"Receiver Mixer", "MIC1 Switch", "MIC1 Input"},
{"Receiver Mixer", "MIC2 Switch", "MIC2 Input"},
{"Receiver Mixer", "IN1 Switch", "IN1 Input"},
{"Receiver Mixer", "IN2 Switch", "IN2 Input"},
/* Left Lineout output mixer */
{"Left Lineout Mixer", "Left DAC1 Switch", "DACL1"},
{"Left Lineout Mixer", "Right DAC1 Switch", "DACR1"},
{"Left Lineout Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left Lineout Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left Lineout Mixer", "IN1 Switch", "IN1 Input"},
{"Left Lineout Mixer", "IN2 Switch", "IN2 Input"},
/* Right lineout output mixer */
{"Right Lineout Mixer", "Left DAC1 Switch", "DACL1"},
{"Right Lineout Mixer", "Right DAC1 Switch", "DACR1"},
{"Right Lineout Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right Lineout Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right Lineout Mixer", "IN1 Switch", "IN1 Input"},
{"Right Lineout Mixer", "IN2 Switch", "IN2 Input"},
{"HP Left Out", NULL, "Left Headphone Mixer"},
{"HP Right Out", NULL, "Right Headphone Mixer"},
{"SPK Left Out", NULL, "Left Speaker Mixer"},
{"SPK Right Out", NULL, "Right Speaker Mixer"},
{"RCV Mono Out", NULL, "Receiver Mixer"},
{"LINE Left Out", NULL, "Left Lineout Mixer"},
{"LINE Right Out", NULL, "Right Lineout Mixer"},
{"HPL", NULL, "HP Left Out"},
{"HPR", NULL, "HP Right Out"},
{"SPKL", NULL, "SPK Left Out"},
{"SPKR", NULL, "SPK Right Out"},
{"RCV", NULL, "RCV Mono Out"},
{"OUT1", NULL, "LINE Left Out"},
{"OUT2", NULL, "LINE Right Out"},
{"OUT3", NULL, "LINE Left Out"},
{"OUT4", NULL, "LINE Right Out"},
/* Left ADC input mixer */
{"Left ADC Mixer", "MIC1 Switch", "MIC1 Input"},
{"Left ADC Mixer", "MIC2 Switch", "MIC2 Input"},
{"Left ADC Mixer", "IN1 Switch", "IN1 Input"},
{"Left ADC Mixer", "IN2 Switch", "IN2 Input"},
/* Right ADC input mixer */
{"Right ADC Mixer", "MIC1 Switch", "MIC1 Input"},
{"Right ADC Mixer", "MIC2 Switch", "MIC2 Input"},
{"Right ADC Mixer", "IN1 Switch", "IN1 Input"},
{"Right ADC Mixer", "IN2 Switch", "IN2 Input"},
/* Inputs */
{"ADCL", NULL, "Left ADC Mixer"},
{"ADCR", NULL, "Right ADC Mixer"},
{"IN1 Input", NULL, "INA1"},
{"IN2 Input", NULL, "INA2"},
{"MIC1 Input", NULL, "MIC1"},
{"MIC2 Input", NULL, "MIC2"},
};
static int max98095_add_widgets(struct snd_soc_codec *codec)
{
snd_soc_add_codec_controls(codec, max98095_snd_controls,
ARRAY_SIZE(max98095_snd_controls));
return 0;
}
/* codec mclk clock divider coefficients */
static const struct {
u32 rate;
u8 sr;
} rate_table[] = {
{8000, 0x01},
{11025, 0x02},
{16000, 0x03},
{22050, 0x04},
{24000, 0x05},
{32000, 0x06},
{44100, 0x07},
{48000, 0x08},
{88200, 0x09},
{96000, 0x0A},
};
static int rate_value(int rate, u8 *value)
{
int i;
for (i = 0; i < ARRAY_SIZE(rate_table); i++) {
if (rate_table[i].rate >= rate) {
*value = rate_table[i].sr;
return 0;
}
}
*value = rate_table[0].sr;
return -EINVAL;
}
static int max98095_dai1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_cdata *cdata;
unsigned long long ni;
unsigned int rate;
u8 regval;
cdata = &max98095->dai[0];
rate = params_rate(params);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
snd_soc_update_bits(codec, M98095_02A_DAI1_FORMAT,
M98095_DAI_WS, 0);
break;
case SNDRV_PCM_FORMAT_S24_LE:
snd_soc_update_bits(codec, M98095_02A_DAI1_FORMAT,
M98095_DAI_WS, M98095_DAI_WS);
break;
default:
return -EINVAL;
}
if (rate_value(rate, ®val))
return -EINVAL;
snd_soc_update_bits(codec, M98095_027_DAI1_CLKMODE,
M98095_CLKMODE_MASK, regval);
cdata->rate = rate;
/* Configure NI when operating as master */
if (snd_soc_read(codec, M98095_02A_DAI1_FORMAT) & M98095_DAI_MAS) {
if (max98095->sysclk == 0) {
dev_err(codec->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
do_div(ni, (unsigned long long int)max98095->sysclk);
snd_soc_write(codec, M98095_028_DAI1_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_write(codec, M98095_029_DAI1_CLKCFG_LO,
ni & 0xFF);
}
/* Update sample rate mode */
if (rate < 50000)
snd_soc_update_bits(codec, M98095_02E_DAI1_FILTERS,
M98095_DAI_DHF, 0);
else
snd_soc_update_bits(codec, M98095_02E_DAI1_FILTERS,
M98095_DAI_DHF, M98095_DAI_DHF);
return 0;
}
static int max98095_dai2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_cdata *cdata;
unsigned long long ni;
unsigned int rate;
u8 regval;
cdata = &max98095->dai[1];
rate = params_rate(params);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
snd_soc_update_bits(codec, M98095_034_DAI2_FORMAT,
M98095_DAI_WS, 0);
break;
case SNDRV_PCM_FORMAT_S24_LE:
snd_soc_update_bits(codec, M98095_034_DAI2_FORMAT,
M98095_DAI_WS, M98095_DAI_WS);
break;
default:
return -EINVAL;
}
if (rate_value(rate, ®val))
return -EINVAL;
snd_soc_update_bits(codec, M98095_031_DAI2_CLKMODE,
M98095_CLKMODE_MASK, regval);
cdata->rate = rate;
/* Configure NI when operating as master */
if (snd_soc_read(codec, M98095_034_DAI2_FORMAT) & M98095_DAI_MAS) {
if (max98095->sysclk == 0) {
dev_err(codec->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
do_div(ni, (unsigned long long int)max98095->sysclk);
snd_soc_write(codec, M98095_032_DAI2_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_write(codec, M98095_033_DAI2_CLKCFG_LO,
ni & 0xFF);
}
/* Update sample rate mode */
if (rate < 50000)
snd_soc_update_bits(codec, M98095_038_DAI2_FILTERS,
M98095_DAI_DHF, 0);
else
snd_soc_update_bits(codec, M98095_038_DAI2_FILTERS,
M98095_DAI_DHF, M98095_DAI_DHF);
return 0;
}
static int max98095_dai3_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_cdata *cdata;
unsigned long long ni;
unsigned int rate;
u8 regval;
cdata = &max98095->dai[2];
rate = params_rate(params);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
snd_soc_update_bits(codec, M98095_03E_DAI3_FORMAT,
M98095_DAI_WS, 0);
break;
case SNDRV_PCM_FORMAT_S24_LE:
snd_soc_update_bits(codec, M98095_03E_DAI3_FORMAT,
M98095_DAI_WS, M98095_DAI_WS);
break;
default:
return -EINVAL;
}
if (rate_value(rate, ®val))
return -EINVAL;
snd_soc_update_bits(codec, M98095_03B_DAI3_CLKMODE,
M98095_CLKMODE_MASK, regval);
cdata->rate = rate;
/* Configure NI when operating as master */
if (snd_soc_read(codec, M98095_03E_DAI3_FORMAT) & M98095_DAI_MAS) {
if (max98095->sysclk == 0) {
dev_err(codec->dev, "Invalid system clock frequency\n");
return -EINVAL;
}
ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL)
* (unsigned long long int)rate;
do_div(ni, (unsigned long long int)max98095->sysclk);
snd_soc_write(codec, M98095_03C_DAI3_CLKCFG_HI,
(ni >> 8) & 0x7F);
snd_soc_write(codec, M98095_03D_DAI3_CLKCFG_LO,
ni & 0xFF);
}
/* Update sample rate mode */
if (rate < 50000)
snd_soc_update_bits(codec, M98095_042_DAI3_FILTERS,
M98095_DAI_DHF, 0);
else
snd_soc_update_bits(codec, M98095_042_DAI3_FILTERS,
M98095_DAI_DHF, M98095_DAI_DHF);
return 0;
}
static int max98095_dai_set_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
/* Requested clock frequency is already setup */
if (freq == max98095->sysclk)
return 0;
/* Setup clocks for slave mode, and using the PLL
* PSCLK = 0x01 (when master clk is 10MHz to 20MHz)
* 0x02 (when master clk is 20MHz to 40MHz)..
* 0x03 (when master clk is 40MHz to 60MHz)..
*/
if ((freq >= 10000000) && (freq < 20000000)) {
snd_soc_write(codec, M98095_026_SYS_CLK, 0x10);
} else if ((freq >= 20000000) && (freq < 40000000)) {
snd_soc_write(codec, M98095_026_SYS_CLK, 0x20);
} else if ((freq >= 40000000) && (freq < 60000000)) {
snd_soc_write(codec, M98095_026_SYS_CLK, 0x30);
} else {
dev_err(codec->dev, "Invalid master clock frequency\n");
return -EINVAL;
}
dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
max98095->sysclk = freq;
return 0;
}
static int max98095_dai1_set_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_cdata *cdata;
u8 regval = 0;
cdata = &max98095->dai[0];
if (fmt != cdata->fmt) {
cdata->fmt = fmt;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* Slave mode PLL */
snd_soc_write(codec, M98095_028_DAI1_CLKCFG_HI,
0x80);
snd_soc_write(codec, M98095_029_DAI1_CLKCFG_LO,
0x00);
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* Set to master mode */
regval |= M98095_DAI_MAS;
break;
case SND_SOC_DAIFMT_CBS_CFM:
case SND_SOC_DAIFMT_CBM_CFS:
default:
dev_err(codec->dev, "Clock mode unsupported");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
regval |= M98095_DAI_DLY;
break;
case SND_SOC_DAIFMT_LEFT_J:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
regval |= M98095_DAI_WCI;
break;
case SND_SOC_DAIFMT_IB_NF:
regval |= M98095_DAI_BCI;
break;
case SND_SOC_DAIFMT_IB_IF:
regval |= M98095_DAI_BCI|M98095_DAI_WCI;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, M98095_02A_DAI1_FORMAT,
M98095_DAI_MAS | M98095_DAI_DLY | M98095_DAI_BCI |
M98095_DAI_WCI, regval);
snd_soc_write(codec, M98095_02B_DAI1_CLOCK, M98095_DAI_BSEL64);
}
return 0;
}
static int max98095_dai2_set_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_cdata *cdata;
u8 regval = 0;
cdata = &max98095->dai[1];
if (fmt != cdata->fmt) {
cdata->fmt = fmt;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* Slave mode PLL */
snd_soc_write(codec, M98095_032_DAI2_CLKCFG_HI,
0x80);
snd_soc_write(codec, M98095_033_DAI2_CLKCFG_LO,
0x00);
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* Set to master mode */
regval |= M98095_DAI_MAS;
break;
case SND_SOC_DAIFMT_CBS_CFM:
case SND_SOC_DAIFMT_CBM_CFS:
default:
dev_err(codec->dev, "Clock mode unsupported");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
regval |= M98095_DAI_DLY;
break;
case SND_SOC_DAIFMT_LEFT_J:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
regval |= M98095_DAI_WCI;
break;
case SND_SOC_DAIFMT_IB_NF:
regval |= M98095_DAI_BCI;
break;
case SND_SOC_DAIFMT_IB_IF:
regval |= M98095_DAI_BCI|M98095_DAI_WCI;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, M98095_034_DAI2_FORMAT,
M98095_DAI_MAS | M98095_DAI_DLY | M98095_DAI_BCI |
M98095_DAI_WCI, regval);
snd_soc_write(codec, M98095_035_DAI2_CLOCK,
M98095_DAI_BSEL64);
}
return 0;
}
static int max98095_dai3_set_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_cdata *cdata;
u8 regval = 0;
cdata = &max98095->dai[2];
if (fmt != cdata->fmt) {
cdata->fmt = fmt;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* Slave mode PLL */
snd_soc_write(codec, M98095_03C_DAI3_CLKCFG_HI,
0x80);
snd_soc_write(codec, M98095_03D_DAI3_CLKCFG_LO,
0x00);
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* Set to master mode */
regval |= M98095_DAI_MAS;
break;
case SND_SOC_DAIFMT_CBS_CFM:
case SND_SOC_DAIFMT_CBM_CFS:
default:
dev_err(codec->dev, "Clock mode unsupported");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
regval |= M98095_DAI_DLY;
break;
case SND_SOC_DAIFMT_LEFT_J:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
regval |= M98095_DAI_WCI;
break;
case SND_SOC_DAIFMT_IB_NF:
regval |= M98095_DAI_BCI;
break;
case SND_SOC_DAIFMT_IB_IF:
regval |= M98095_DAI_BCI|M98095_DAI_WCI;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, M98095_03E_DAI3_FORMAT,
M98095_DAI_MAS | M98095_DAI_DLY | M98095_DAI_BCI |
M98095_DAI_WCI, regval);
snd_soc_write(codec, M98095_03F_DAI3_CLOCK,
M98095_DAI_BSEL64);
}
return 0;
}
static int max98095_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
int ret;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
ret = snd_soc_cache_sync(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
return ret;
}
}
snd_soc_update_bits(codec, M98095_090_PWR_EN_IN,
M98095_MBEN, M98095_MBEN);
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, M98095_090_PWR_EN_IN,
M98095_MBEN, 0);
codec->cache_sync = 1;
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define MAX98095_RATES SNDRV_PCM_RATE_8000_96000
#define MAX98095_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops max98095_dai1_ops = {
.set_sysclk = max98095_dai_set_sysclk,
.set_fmt = max98095_dai1_set_fmt,
.hw_params = max98095_dai1_hw_params,
};
static const struct snd_soc_dai_ops max98095_dai2_ops = {
.set_sysclk = max98095_dai_set_sysclk,
.set_fmt = max98095_dai2_set_fmt,
.hw_params = max98095_dai2_hw_params,
};
static const struct snd_soc_dai_ops max98095_dai3_ops = {
.set_sysclk = max98095_dai_set_sysclk,
.set_fmt = max98095_dai3_set_fmt,
.hw_params = max98095_dai3_hw_params,
};
static struct snd_soc_dai_driver max98095_dai[] = {
{
.name = "HiFi",
.playback = {
.stream_name = "HiFi Playback",
.channels_min = 1,
.channels_max = 2,
.rates = MAX98095_RATES,
.formats = MAX98095_FORMATS,
},
.capture = {
.stream_name = "HiFi Capture",
.channels_min = 1,
.channels_max = 2,
.rates = MAX98095_RATES,
.formats = MAX98095_FORMATS,
},
.ops = &max98095_dai1_ops,
},
{
.name = "Aux",
.playback = {
.stream_name = "Aux Playback",
.channels_min = 1,
.channels_max = 1,
.rates = MAX98095_RATES,
.formats = MAX98095_FORMATS,
},
.ops = &max98095_dai2_ops,
},
{
.name = "Voice",
.playback = {
.stream_name = "Voice Playback",
.channels_min = 1,
.channels_max = 1,
.rates = MAX98095_RATES,
.formats = MAX98095_FORMATS,
},
.ops = &max98095_dai3_ops,
}
};
static int max98095_get_eq_channel(const char *name)
{
if (strcmp(name, "EQ1 Mode") == 0)
return 0;
if (strcmp(name, "EQ2 Mode") == 0)
return 1;
return -EINVAL;
}
static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_pdata *pdata = max98095->pdata;
int channel = max98095_get_eq_channel(kcontrol->id.name);
struct max98095_cdata *cdata;
int sel = ucontrol->value.integer.value[0];
struct max98095_eq_cfg *coef_set;
int fs, best, best_val, i;
int regmask, regsave;
BUG_ON(channel > 1);
if (!pdata || !max98095->eq_textcnt)
return 0;
if (sel >= pdata->eq_cfgcnt)
return -EINVAL;
cdata = &max98095->dai[channel];
cdata->eq_sel = sel;
fs = cdata->rate;
/* Find the selected configuration with nearest sample rate */
best = 0;
best_val = INT_MAX;
for (i = 0; i < pdata->eq_cfgcnt; i++) {
if (strcmp(pdata->eq_cfg[i].name, max98095->eq_texts[sel]) == 0 &&
abs(pdata->eq_cfg[i].rate - fs) < best_val) {
best = i;
best_val = abs(pdata->eq_cfg[i].rate - fs);
}
}
dev_dbg(codec->dev, "Selected %s/%dHz for %dHz sample rate\n",
pdata->eq_cfg[best].name,
pdata->eq_cfg[best].rate, fs);
coef_set = &pdata->eq_cfg[best];
regmask = (channel == 0) ? M98095_EQ1EN : M98095_EQ2EN;
/* Disable filter while configuring, and save current on/off state */
regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL);
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0);
mutex_lock(&codec->mutex);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG);
m98095_eq_band(codec, channel, 0, coef_set->band1);
m98095_eq_band(codec, channel, 1, coef_set->band2);
m98095_eq_band(codec, channel, 2, coef_set->band3);
m98095_eq_band(codec, channel, 3, coef_set->band4);
m98095_eq_band(codec, channel, 4, coef_set->band5);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0);
mutex_unlock(&codec->mutex);
/* Restore the original on/off state */
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave);
return 0;
}
static int max98095_get_eq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
int channel = max98095_get_eq_channel(kcontrol->id.name);
struct max98095_cdata *cdata;
cdata = &max98095->dai[channel];
ucontrol->value.enumerated.item[0] = cdata->eq_sel;
return 0;
}
static void max98095_handle_eq_pdata(struct snd_soc_codec *codec)
{
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_pdata *pdata = max98095->pdata;
struct max98095_eq_cfg *cfg;
unsigned int cfgcnt;
int i, j;
const char **t;
int ret;
struct snd_kcontrol_new controls[] = {
SOC_ENUM_EXT("EQ1 Mode",
max98095->eq_enum,
max98095_get_eq_enum,
max98095_put_eq_enum),
SOC_ENUM_EXT("EQ2 Mode",
max98095->eq_enum,
max98095_get_eq_enum,
max98095_put_eq_enum),
};
cfg = pdata->eq_cfg;
cfgcnt = pdata->eq_cfgcnt;
/* Setup an array of texts for the equalizer enum.
* This is based on Mark Brown's equalizer driver code.
*/
max98095->eq_textcnt = 0;
max98095->eq_texts = NULL;
for (i = 0; i < cfgcnt; i++) {
for (j = 0; j < max98095->eq_textcnt; j++) {
if (strcmp(cfg[i].name, max98095->eq_texts[j]) == 0)
break;
}
if (j != max98095->eq_textcnt)
continue;
/* Expand the array */
t = krealloc(max98095->eq_texts,
sizeof(char *) * (max98095->eq_textcnt + 1),
GFP_KERNEL);
if (t == NULL)
continue;
/* Store the new entry */
t[max98095->eq_textcnt] = cfg[i].name;
max98095->eq_textcnt++;
max98095->eq_texts = t;
}
/* Now point the soc_enum to .texts array items */
max98095->eq_enum.texts = max98095->eq_texts;
max98095->eq_enum.max = max98095->eq_textcnt;
ret = snd_soc_add_codec_controls(codec, controls, ARRAY_SIZE(controls));
if (ret != 0)
dev_err(codec->dev, "Failed to add EQ control: %d\n", ret);
}
static const char *bq_mode_name[] = {"Biquad1 Mode", "Biquad2 Mode"};
static int max98095_get_bq_channel(struct snd_soc_codec *codec,
const char *name)
{
int i;
for (i = 0; i < ARRAY_SIZE(bq_mode_name); i++)
if (strcmp(name, bq_mode_name[i]) == 0)
return i;
/* Shouldn't happen */
dev_err(codec->dev, "Bad biquad channel name '%s'\n", name);
return -EINVAL;
}
static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_pdata *pdata = max98095->pdata;
int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
struct max98095_cdata *cdata;
int sel = ucontrol->value.integer.value[0];
struct max98095_biquad_cfg *coef_set;
int fs, best, best_val, i;
int regmask, regsave;
if (channel < 0)
return channel;
if (!pdata || !max98095->bq_textcnt)
return 0;
if (sel >= pdata->bq_cfgcnt)
return -EINVAL;
cdata = &max98095->dai[channel];
cdata->bq_sel = sel;
fs = cdata->rate;
/* Find the selected configuration with nearest sample rate */
best = 0;
best_val = INT_MAX;
for (i = 0; i < pdata->bq_cfgcnt; i++) {
if (strcmp(pdata->bq_cfg[i].name, max98095->bq_texts[sel]) == 0 &&
abs(pdata->bq_cfg[i].rate - fs) < best_val) {
best = i;
best_val = abs(pdata->bq_cfg[i].rate - fs);
}
}
dev_dbg(codec->dev, "Selected %s/%dHz for %dHz sample rate\n",
pdata->bq_cfg[best].name,
pdata->bq_cfg[best].rate, fs);
coef_set = &pdata->bq_cfg[best];
regmask = (channel == 0) ? M98095_BQ1EN : M98095_BQ2EN;
/* Disable filter while configuring, and save current on/off state */
regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL);
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0);
mutex_lock(&codec->mutex);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG);
m98095_biquad_band(codec, channel, 0, coef_set->band1);
m98095_biquad_band(codec, channel, 1, coef_set->band2);
snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0);
mutex_unlock(&codec->mutex);
/* Restore the original on/off state */
snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave);
return 0;
}
static int max98095_get_bq_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
struct max98095_cdata *cdata;
if (channel < 0)
return channel;
cdata = &max98095->dai[channel];
ucontrol->value.enumerated.item[0] = cdata->bq_sel;
return 0;
}
static void max98095_handle_bq_pdata(struct snd_soc_codec *codec)
{
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_pdata *pdata = max98095->pdata;
struct max98095_biquad_cfg *cfg;
unsigned int cfgcnt;
int i, j;
const char **t;
int ret;
struct snd_kcontrol_new controls[] = {
SOC_ENUM_EXT((char *)bq_mode_name[0],
max98095->bq_enum,
max98095_get_bq_enum,
max98095_put_bq_enum),
SOC_ENUM_EXT((char *)bq_mode_name[1],
max98095->bq_enum,
max98095_get_bq_enum,
max98095_put_bq_enum),
};
BUILD_BUG_ON(ARRAY_SIZE(controls) != ARRAY_SIZE(bq_mode_name));
cfg = pdata->bq_cfg;
cfgcnt = pdata->bq_cfgcnt;
/* Setup an array of texts for the biquad enum.
* This is based on Mark Brown's equalizer driver code.
*/
max98095->bq_textcnt = 0;
max98095->bq_texts = NULL;
for (i = 0; i < cfgcnt; i++) {
for (j = 0; j < max98095->bq_textcnt; j++) {
if (strcmp(cfg[i].name, max98095->bq_texts[j]) == 0)
break;
}
if (j != max98095->bq_textcnt)
continue;
/* Expand the array */
t = krealloc(max98095->bq_texts,
sizeof(char *) * (max98095->bq_textcnt + 1),
GFP_KERNEL);
if (t == NULL)
continue;
/* Store the new entry */
t[max98095->bq_textcnt] = cfg[i].name;
max98095->bq_textcnt++;
max98095->bq_texts = t;
}
/* Now point the soc_enum to .texts array items */
max98095->bq_enum.texts = max98095->bq_texts;
max98095->bq_enum.max = max98095->bq_textcnt;
ret = snd_soc_add_codec_controls(codec, controls, ARRAY_SIZE(controls));
if (ret != 0)
dev_err(codec->dev, "Failed to add Biquad control: %d\n", ret);
}
static void max98095_handle_pdata(struct snd_soc_codec *codec)
{
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_pdata *pdata = max98095->pdata;
u8 regval = 0;
if (!pdata) {
dev_dbg(codec->dev, "No platform data\n");
return;
}
/* Configure mic for analog/digital mic mode */
if (pdata->digmic_left_mode)
regval |= M98095_DIGMIC_L;
if (pdata->digmic_right_mode)
regval |= M98095_DIGMIC_R;
snd_soc_write(codec, M98095_087_CFG_MIC, regval);
/* Configure equalizers */
if (pdata->eq_cfgcnt)
max98095_handle_eq_pdata(codec);
/* Configure bi-quad filters */
if (pdata->bq_cfgcnt)
max98095_handle_bq_pdata(codec);
}
#ifdef CONFIG_PM
static int max98095_suspend(struct snd_soc_codec *codec)
{
max98095_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int max98095_resume(struct snd_soc_codec *codec)
{
max98095_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
#else
#define max98095_suspend NULL
#define max98095_resume NULL
#endif
static int max98095_reset(struct snd_soc_codec *codec)
{
int i, ret;
/* Gracefully reset the DSP core and the codec hardware
* in a proper sequence */
ret = snd_soc_write(codec, M98095_00F_HOST_CFG, 0);
if (ret < 0) {
dev_err(codec->dev, "Failed to reset DSP: %d\n", ret);
return ret;
}
ret = snd_soc_write(codec, M98095_097_PWR_SYS, 0);
if (ret < 0) {
dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
return ret;
}
/* Reset to hardware default for registers, as there is not
* a soft reset hardware control register */
for (i = M98095_010_HOST_INT_CFG; i < M98095_REG_MAX_CACHED; i++) {
ret = snd_soc_write(codec, i, max98095_reg_def[i]);
if (ret < 0) {
dev_err(codec->dev, "Failed to reset: %d\n", ret);
return ret;
}
}
return ret;
}
static int max98095_probe(struct snd_soc_codec *codec)
{
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
struct max98095_cdata *cdata;
int ret = 0;
ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
/* reset the codec, the DSP core, and disable all interrupts */
max98095_reset(codec);
/* initialize private data */
max98095->sysclk = (unsigned)-1;
max98095->eq_textcnt = 0;
max98095->bq_textcnt = 0;
cdata = &max98095->dai[0];
cdata->rate = (unsigned)-1;
cdata->fmt = (unsigned)-1;
cdata->eq_sel = 0;
cdata->bq_sel = 0;
cdata = &max98095->dai[1];
cdata->rate = (unsigned)-1;
cdata->fmt = (unsigned)-1;
cdata->eq_sel = 0;
cdata->bq_sel = 0;
cdata = &max98095->dai[2];
cdata->rate = (unsigned)-1;
cdata->fmt = (unsigned)-1;
cdata->eq_sel = 0;
cdata->bq_sel = 0;
max98095->lin_state = 0;
max98095->mic1pre = 0;
max98095->mic2pre = 0;
ret = snd_soc_read(codec, M98095_0FF_REV_ID);
if (ret < 0) {
dev_err(codec->dev, "Failure reading hardware revision: %d\n",
ret);
goto err_access;
}
dev_info(codec->dev, "Hardware revision: %c\n", ret - 0x40 + 'A');
snd_soc_write(codec, M98095_097_PWR_SYS, M98095_PWRSV);
/* initialize registers cache to hardware default */
max98095_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
snd_soc_write(codec, M98095_048_MIX_DAC_LR,
M98095_DAI1L_TO_DACL|M98095_DAI1R_TO_DACR);
snd_soc_write(codec, M98095_049_MIX_DAC_M,
M98095_DAI2M_TO_DACM|M98095_DAI3M_TO_DACM);
snd_soc_write(codec, M98095_092_PWR_EN_OUT, M98095_SPK_SPREADSPECTRUM);
snd_soc_write(codec, M98095_045_CFG_DSP, M98095_DSPNORMAL);
snd_soc_write(codec, M98095_04E_CFG_HP, M98095_HPNORMAL);
snd_soc_write(codec, M98095_02C_DAI1_IOCFG,
M98095_S1NORMAL|M98095_SDATA);
snd_soc_write(codec, M98095_036_DAI2_IOCFG,
M98095_S2NORMAL|M98095_SDATA);
snd_soc_write(codec, M98095_040_DAI3_IOCFG,
M98095_S3NORMAL|M98095_SDATA);
max98095_handle_pdata(codec);
/* take the codec out of the shut down */
snd_soc_update_bits(codec, M98095_097_PWR_SYS, M98095_SHDNRUN,
M98095_SHDNRUN);
max98095_add_widgets(codec);
err_access:
return ret;
}
static int max98095_remove(struct snd_soc_codec *codec)
{
max98095_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_max98095 = {
.probe = max98095_probe,
.remove = max98095_remove,
.suspend = max98095_suspend,
.resume = max98095_resume,
.set_bias_level = max98095_set_bias_level,
.reg_cache_size = ARRAY_SIZE(max98095_reg_def),
.reg_word_size = sizeof(u8),
.reg_cache_default = max98095_reg_def,
.readable_register = max98095_readable,
.volatile_register = max98095_volatile,
.dapm_widgets = max98095_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(max98095_dapm_widgets),
.dapm_routes = max98095_audio_map,
.num_dapm_routes = ARRAY_SIZE(max98095_audio_map),
};
static int max98095_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max98095_priv *max98095;
int ret;
max98095 = devm_kzalloc(&i2c->dev, sizeof(struct max98095_priv),
GFP_KERNEL);
if (max98095 == NULL)
return -ENOMEM;
max98095->devtype = id->driver_data;
i2c_set_clientdata(i2c, max98095);
max98095->pdata = i2c->dev.platform_data;
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98095,
max98095_dai, ARRAY_SIZE(max98095_dai));
return ret;
}
static int __devexit max98095_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id max98095_i2c_id[] = {
{ "max98095", MAX98095 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max98095_i2c_id);
static struct i2c_driver max98095_i2c_driver = {
.driver = {
.name = "max98095",
.owner = THIS_MODULE,
},
.probe = max98095_i2c_probe,
.remove = __devexit_p(max98095_i2c_remove),
.id_table = max98095_i2c_id,
};
static int __init max98095_init(void)
{
int ret;
ret = i2c_add_driver(&max98095_i2c_driver);
if (ret)
pr_err("Failed to register max98095 I2C driver: %d\n", ret);
return ret;
}
module_init(max98095_init);
static void __exit max98095_exit(void)
{
i2c_del_driver(&max98095_i2c_driver);
}
module_exit(max98095_exit);
MODULE_DESCRIPTION("ALSA SoC MAX98095 driver");
MODULE_AUTHOR("Peter Hsiang");
MODULE_LICENSE("GPL");
| gpl-2.0 |
0x00evil/Xiaomi_Kernel_OpenSource | arch/blackfin/mach-bf527/boards/ezbrd.c | 4419 | 21072 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/musb.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/nand.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <linux/spi/ad7877.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "ADI BF526-EZBRD";
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
static struct resource musb_resources[] = {
[0] = {
.start = 0xffc03800,
.end = 0xffc03cff,
.flags = IORESOURCE_MEM,
},
[1] = { /* general IRQ */
.start = IRQ_USB_INT0,
.end = IRQ_USB_INT0,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "mc"
},
[2] = { /* DMA IRQ */
.start = IRQ_USB_DMA,
.end = IRQ_USB_DMA,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "dma"
},
};
static struct musb_hdrc_config musb_config = {
.multipoint = 0,
.dyn_fifo = 0,
.soft_con = 1,
.dma = 1,
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PG13,
/* Some custom boards need to be active low, just set it to "0"
* if it is the case.
*/
.gpio_vrsel_active = 1,
.clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
#if defined(CONFIG_USB_MUSB_OTG)
.mode = MUSB_OTG,
#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
.mode = MUSB_HOST,
#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_PERIPHERAL,
#endif
.config = &musb_config,
};
static u64 musb_dmamask = ~(u32)0;
static struct platform_device musb_device = {
.name = "musb-blackfin",
.id = 0,
.dev = {
.dma_mask = &musb_dmamask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &musb_plat,
},
.num_resources = ARRAY_SIZE(musb_resources),
.resource = musb_resources,
};
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition ezbrd_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x1C0000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data ezbrd_flash_data = {
.width = 2,
.parts = ezbrd_partitions,
.nr_parts = ARRAY_SIZE(ezbrd_partitions),
};
static struct resource ezbrd_flash_resource = {
.start = 0x20000000,
.end = 0x203fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device ezbrd_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &ezbrd_flash_data,
},
.num_resources = 1,
.resource = &ezbrd_flash_resource,
};
#endif
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
.name = "bootloader(nand)",
.offset = 0,
.size = 0x40000,
}, {
.name = "linux kernel(nand)",
.offset = MTDPART_OFS_APPEND,
.size = 4 * 1024 * 1024,
},
{
.name = "file system(nand)",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
.rd_dly = 3,
.wr_dly = 3,
};
static struct resource bf5xx_nand_resources[] = {
{
.start = NFC_CTL,
.end = NFC_DATA_RD + 2,
.flags = IORESOURCE_MEM,
},
{
.start = CH_NFC,
.end = CH_NFC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bf5xx_nand_device = {
.name = "bf5xx-nand",
.id = 0,
.num_resources = ARRAY_SIZE(bf5xx_nand_resources),
.resource = bf5xx_nand_resources,
.dev = {
.platform_data = &bf5xx_nand_platform,
},
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_RMII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_RMII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "sst25wf040",
};
/* SPI flash chip (sst25wf040) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.pressure_max = 1000,
.pressure_min = 0,
.stopacq_polarity = 1,
.first_conversion_delay = 3,
.acquisition_time = 1,
.averaging = 1,
.pen_down_acc_interval = 1,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
#include <linux/spi/ad7879.h>
static const struct ad7879_platform_data bfin_ad7879_ts_info = {
.model = 7879, /* Model = AD7879 */
.x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */
.pressure_max = 10000,
.pressure_min = 0,
.first_conversion_delay = 3, /* wait 512us before do a first conversion */
.acquisition_time = 1, /* 4us acquisition time per sample */
.median = 2, /* do 8 measurements */
.averaging = 1, /* take the average of 4 middle samples */
.pen_down_acc_interval = 255, /* 9.4 ms */
.gpio_export = 1, /* Export GPIO to gpiolib */
.gpio_base = -1, /* Dynamic allocation */
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
{
.modalias = "ad7877",
.platform_data = &bfin_ad7877_ts_info,
.irq = IRQ_PF8,
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
{
.modalias = "ad7879",
.platform_data = &bfin_ad7879_ts_info,
.irq = IRQ_PG0,
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
&& defined(CONFIG_SND_SOC_WM8731_SPI)
{
.modalias = "wm8731",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
{
.modalias = "bfin-lq035q1-spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
};
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART1_CTSRTS
{ /* CTS pin */
.start = GPIO_PG0,
.end = GPIO_PG0,
.flags = IORESOURCE_IO,
},
{ /* RTS pin */
.start = GPIO_PF10,
.end = GPIO_PF10,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("pcf8574_lcd", 0x22),
},
#endif
#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE)
{
I2C_BOARD_INFO("pcf8574_keypad", 0x27),
.irq = IRQ_PF8,
},
#endif
};
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"},
{BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"},
};
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_100, 400000000),
VRPAIR(VLEV_105, 426000000),
VRPAIR(VLEV_110, 500000000),
VRPAIR(VLEV_115, 533000000),
VRPAIR(VLEV_120, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
#include <asm/bfin-lq035q1.h>
static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
.mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
.ppi_mode = USE_RGB565_16_BIT_PPI,
.use_bl = 1,
.gpio_bl = GPIO_PG12,
};
static struct resource bfin_lq035q1_resources[] = {
{
.start = IRQ_PPI_ERROR,
.end = IRQ_PPI_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_lq035q1_device = {
.name = "bfin-lq035q1",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_lq035q1_resources),
.resource = bfin_lq035q1_resources,
.dev = {
.platform_data = &bfin_lq035q1_data,
},
};
#endif
static struct platform_device *stamp_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
&bf5xx_nand_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
&musb_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
&bfin_lq035q1_device,
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
&ezbrd_flash_device,
#endif
};
static int __init ezbrd_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
arch_initcall(ezbrd_init);
static struct platform_device *ezbrd_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(ezbrd_early_devices,
ARRAY_SIZE(ezbrd_early_devices));
}
void native_machine_restart(char *cmd)
{
/* workaround reboot hang when booting from SPI */
if ((bfin_read_SYSCR() & 0x7) == 0x3)
bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
}
int bfin_get_ether_addr(char *addr)
{
/* the MAC is stored in OTP memory page 0xDF */
u32 ret;
u64 otp_mac;
u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A;
ret = otp_read(0xDF, 0x00, &otp_mac);
if (!(ret & 0x1)) {
char *otp_mac_p = (char *)&otp_mac;
for (ret = 0; ret < 6; ++ret)
addr[ret] = otp_mac_p[5 - ret];
}
return 0;
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
greg17477/kernel_motley_mako | arch/blackfin/mach-bf537/boards/cm_bf537e.c | 4419 | 18617 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Bluetechnix
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "Bluetechnix CM BF537E";
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
.name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p64",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
static struct platform_device hitachi_fb_device = {
.name = "hitachi-tx09",
};
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.start = 0x20200300,
.end = 0x20200300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF14,
.end = IRQ_PF14,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
static struct resource isp1362_hcd_resources[] = {
{
.start = 0x20308000,
.end = 0x20308000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20308004,
.end = 0x20308004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PG15,
.end = IRQ_PG15,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
static struct isp1362_platform_data isp1362_priv = {
.sel15Kres = 1,
.clknotstop = 0,
.oc_enable = 0,
.int_act_high = 0,
.int_edge_triggered = 0,
.remote_wakeup_connected = 0,
.no_power_switching = 1,
.power_switching_mode = 0,
};
static struct platform_device isp1362_hcd_device = {
.name = "isp1362-hcd",
.id = 0,
.dev = {
.platform_data = &isp1362_priv,
},
.num_resources = ARRAY_SIZE(isp1362_hcd_resources),
.resource = isp1362_hcd_resources,
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PG13,
.end = IRQ_PG13,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
static struct mtd_partition cm_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x100000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data cm_flash_data = {
.width = 2,
.parts = cm_partitions,
.nr_parts = ARRAY_SIZE(cm_partitions),
};
static unsigned cm_flash_gpios[] = { GPIO_PF4 };
static struct resource cm_flash_resource[] = {
{
.name = "cfi_probe",
.start = 0x20000000,
.end = 0x201fffff,
.flags = IORESOURCE_MEM,
}, {
.start = (unsigned long)cm_flash_gpios,
.end = ARRAY_SIZE(cm_flash_gpios),
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device cm_flash_device = {
.name = "gpio-addr-flash",
.id = 0,
.dev = {
.platform_data = &cm_flash_data,
},
.num_resources = ARRAY_SIZE(cm_flash_resource),
.resource = cm_flash_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART0_CTSRTS
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART1_CTSRTS
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_MII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
#define PATA_INT IRQ_PF14
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 2,
.irq_type = IRQF_TRIGGER_HIGH,
};
static struct resource bfin_pata_resources[] = {
{
.start = 0x2030C000,
.end = 0x2030C01F,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2030D018,
.end = 0x2030D01B,
.flags = IORESOURCE_MEM,
},
{
.start = PATA_INT,
.end = PATA_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pata_resources),
.resource = bfin_pata_resources,
.dev = {
.platform_data = &bfin_pata_platform_data,
}
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
VRPAIR(VLEV_090, 376000000),
VRPAIR(VLEV_095, 426000000),
VRPAIR(VLEV_100, 426000000),
VRPAIR(VLEV_105, 476000000),
VRPAIR(VLEV_110, 476000000),
VRPAIR(VLEV_115, 476000000),
VRPAIR(VLEV_120, 500000000),
VRPAIR(VLEV_125, 533000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *cm_bf537e_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
&hitachi_fb_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
&isp1362_hcd_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
&bfin_pata_device,
#endif
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
&cm_flash_device,
#endif
};
static int __init net2272_init(void)
{
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
int ret;
ret = gpio_request(GPIO_PG14, "net2272");
if (ret)
return ret;
/* Reset USB Chip, PG14 */
gpio_direction_output(GPIO_PG14, 0);
mdelay(2);
gpio_set_value(GPIO_PG14, 1);
#endif
return 0;
}
static int __init cm_bf537e_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf537e_devices, ARRAY_SIZE(cm_bf537e_devices));
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
if (net2272_init())
pr_warning("unable to configure net2272; it probably won't work\n");
return 0;
}
arch_initcall(cm_bf537e_init);
static struct platform_device *cm_bf537e_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(cm_bf537e_early_devices,
ARRAY_SIZE(cm_bf537e_early_devices));
}
int bfin_get_ether_addr(char *addr)
{
return 1;
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
eugene373/Nexus_S_ICS | arch/arm/mach-bcmring/timer.c | 4419 | 2102 | /*****************************************************************************
* Copyright 2003 - 2008 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/module.h>
#include <csp/tmrHw.h>
#include <mach/timer.h>
/* The core.c file initializes timers 1 and 3 as a linux clocksource. */
/* The real time clock should probably be the real linux clocksource. */
/* In the meantime, this file should agree with core.c as to the */
/* profiling timer. If the clocksource is moved to rtc later, then */
/* we can init the profiling timer here instead. */
/* Timer 1 provides 25MHz resolution syncrhonized to scheduling and APM timing */
/* Timer 3 provides bus freqeuncy sychronized to ACLK, but spread spectrum will */
/* affect synchronization with scheduling and APM timing. */
#define PROF_TIMER 1
timer_tick_rate_t timer_get_tick_rate(void)
{
return tmrHw_getCountRate(PROF_TIMER);
}
timer_tick_count_t timer_get_tick_count(void)
{
return tmrHw_GetCurrentCount(PROF_TIMER); /* change downcounter to upcounter */
}
timer_msec_t timer_ticks_to_msec(timer_tick_count_t ticks)
{
static int tickRateMsec;
if (tickRateMsec == 0) {
tickRateMsec = timer_get_tick_rate() / 1000;
}
return ticks / tickRateMsec;
}
timer_msec_t timer_get_msec(void)
{
return timer_ticks_to_msec(timer_get_tick_count());
}
EXPORT_SYMBOL(timer_get_tick_count);
EXPORT_SYMBOL(timer_ticks_to_msec);
EXPORT_SYMBOL(timer_get_tick_rate);
EXPORT_SYMBOL(timer_get_msec);
| gpl-2.0 |
marcio199226/ebreo_kernel_d802_msm8974 | arch/powerpc/kvm/book3s_pr_papr.c | 4675 | 4066 | /*
* Copyright (C) 2011. Freescale Inc. All rights reserved.
*
* Authors:
* Alexander Graf <agraf@suse.de>
* Paul Mackerras <paulus@samba.org>
*
* Description:
*
* Hypercall handling for running PAPR guests in PR KVM on Book 3S
* processors.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
unsigned long pteg_addr;
pte_index <<= 4;
pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
pteg_addr |= pte_index;
return pteg_addr;
}
static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
{
long flags = kvmppc_get_gpr(vcpu, 4);
long pte_index = kvmppc_get_gpr(vcpu, 5);
unsigned long pteg[2 * 8];
unsigned long pteg_addr, i, *hpte;
pte_index &= ~7UL;
pteg_addr = get_pteg_addr(vcpu, pte_index);
copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
hpte = pteg;
if (likely((flags & H_EXACT) == 0)) {
pte_index &= ~7UL;
for (i = 0; ; ++i) {
if (i == 8)
return H_PTEG_FULL;
if ((*hpte & HPTE_V_VALID) == 0)
break;
hpte += 2;
}
} else {
i = kvmppc_get_gpr(vcpu, 5) & 7UL;
hpte += i * 2;
}
hpte[0] = kvmppc_get_gpr(vcpu, 6);
hpte[1] = kvmppc_get_gpr(vcpu, 7);
copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
kvmppc_set_gpr(vcpu, 4, pte_index | i);
return EMULATE_DONE;
}
static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
{
unsigned long flags= kvmppc_get_gpr(vcpu, 4);
unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
unsigned long v = 0, pteg, rb;
unsigned long pte[2];
pteg = get_pteg_addr(vcpu, pte_index);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
if ((pte[0] & HPTE_V_VALID) == 0 ||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
return EMULATE_DONE;
}
copy_to_user((void __user *)pteg, &v, sizeof(v));
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
kvmppc_set_gpr(vcpu, 4, pte[0]);
kvmppc_set_gpr(vcpu, 5, pte[1]);
return EMULATE_DONE;
}
static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
{
unsigned long flags = kvmppc_get_gpr(vcpu, 4);
unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
unsigned long rb, pteg, r, v;
unsigned long pte[2];
pteg = get_pteg_addr(vcpu, pte_index);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
if ((pte[0] & HPTE_V_VALID) == 0 ||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
return EMULATE_DONE;
}
v = pte[0];
r = pte[1];
r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
HPTE_R_KEY_LO);
r |= (flags << 55) & HPTE_R_PP0;
r |= (flags << 48) & HPTE_R_KEY_HI;
r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
pte[1] = r;
rb = compute_tlbie_rb(v, r, pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
copy_to_user((void __user *)pteg, pte, sizeof(pte));
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
return EMULATE_DONE;
}
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{
switch (cmd) {
case H_ENTER:
return kvmppc_h_pr_enter(vcpu);
case H_REMOVE:
return kvmppc_h_pr_remove(vcpu);
case H_PROTECT:
return kvmppc_h_pr_protect(vcpu);
case H_BULK_REMOVE:
/* We just flush all PTEs, so user space can
handle the HPT modifications */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
break;
case H_CEDE:
kvm_vcpu_block(vcpu);
vcpu->stat.halt_wakeup++;
return EMULATE_DONE;
}
return EMULATE_FAIL;
}
| gpl-2.0 |
percy-g2/android_kernel_motorola_msm8610 | tools/perf/builtin-kmem.c | 4931 | 17652 | #include "builtin.h"
#include "perf.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
#include <linux/rbtree.h>
struct alloc_stat;
typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
static const char *input_name;
static int alloc_flag;
static int caller_flag;
static int alloc_lines = -1;
static int caller_lines = -1;
static bool raw_ip;
static char default_sort_order[] = "frag,hit,bytes";
static int *cpunode_map;
static int max_cpu_num;
struct alloc_stat {
u64 call_site;
u64 ptr;
u64 bytes_req;
u64 bytes_alloc;
u32 hit;
u32 pingpong;
short alloc_cpu;
struct rb_node node;
};
static struct rb_root root_alloc_stat;
static struct rb_root root_alloc_sorted;
static struct rb_root root_caller_stat;
static struct rb_root root_caller_sorted;
static unsigned long total_requested, total_allocated;
static unsigned long nr_allocs, nr_cross_allocs;
#define PATH_SYS_NODE "/sys/devices/system/node"
static void init_cpunode_map(void)
{
FILE *fp;
int i;
fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
if (!fp) {
max_cpu_num = 4096;
return;
}
if (fscanf(fp, "%d", &max_cpu_num) < 1)
die("Failed to read 'kernel_max' from sysfs");
max_cpu_num++;
cpunode_map = calloc(max_cpu_num, sizeof(int));
if (!cpunode_map)
die("calloc");
for (i = 0; i < max_cpu_num; i++)
cpunode_map[i] = -1;
fclose(fp);
}
static void setup_cpunode_map(void)
{
struct dirent *dent1, *dent2;
DIR *dir1, *dir2;
unsigned int cpu, mem;
char buf[PATH_MAX];
init_cpunode_map();
dir1 = opendir(PATH_SYS_NODE);
if (!dir1)
return;
while ((dent1 = readdir(dir1)) != NULL) {
if (dent1->d_type != DT_DIR ||
sscanf(dent1->d_name, "node%u", &mem) < 1)
continue;
snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
dir2 = opendir(buf);
if (!dir2)
continue;
while ((dent2 = readdir(dir2)) != NULL) {
if (dent2->d_type != DT_LNK ||
sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
continue;
cpunode_map[cpu] = mem;
}
closedir(dir2);
}
closedir(dir1);
}
static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
int bytes_req, int bytes_alloc, int cpu)
{
struct rb_node **node = &root_alloc_stat.rb_node;
struct rb_node *parent = NULL;
struct alloc_stat *data = NULL;
while (*node) {
parent = *node;
data = rb_entry(*node, struct alloc_stat, node);
if (ptr > data->ptr)
node = &(*node)->rb_right;
else if (ptr < data->ptr)
node = &(*node)->rb_left;
else
break;
}
if (data && data->ptr == ptr) {
data->hit++;
data->bytes_req += bytes_req;
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
if (!data)
die("malloc");
data->ptr = ptr;
data->pingpong = 0;
data->hit = 1;
data->bytes_req = bytes_req;
data->bytes_alloc = bytes_alloc;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &root_alloc_stat);
}
data->call_site = call_site;
data->alloc_cpu = cpu;
}
static void insert_caller_stat(unsigned long call_site,
int bytes_req, int bytes_alloc)
{
struct rb_node **node = &root_caller_stat.rb_node;
struct rb_node *parent = NULL;
struct alloc_stat *data = NULL;
while (*node) {
parent = *node;
data = rb_entry(*node, struct alloc_stat, node);
if (call_site > data->call_site)
node = &(*node)->rb_right;
else if (call_site < data->call_site)
node = &(*node)->rb_left;
else
break;
}
if (data && data->call_site == call_site) {
data->hit++;
data->bytes_req += bytes_req;
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
if (!data)
die("malloc");
data->call_site = call_site;
data->pingpong = 0;
data->hit = 1;
data->bytes_req = bytes_req;
data->bytes_alloc = bytes_alloc;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &root_caller_stat);
}
}
static void process_alloc_event(void *data,
struct event *event,
int cpu,
u64 timestamp __used,
struct thread *thread __used,
int node)
{
unsigned long call_site;
unsigned long ptr;
int bytes_req;
int bytes_alloc;
int node1, node2;
ptr = raw_field_value(event, "ptr", data);
call_site = raw_field_value(event, "call_site", data);
bytes_req = raw_field_value(event, "bytes_req", data);
bytes_alloc = raw_field_value(event, "bytes_alloc", data);
insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
insert_caller_stat(call_site, bytes_req, bytes_alloc);
total_requested += bytes_req;
total_allocated += bytes_alloc;
if (node) {
node1 = cpunode_map[cpu];
node2 = raw_field_value(event, "node", data);
if (node1 != node2)
nr_cross_allocs++;
}
nr_allocs++;
}
static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
static struct alloc_stat *search_alloc_stat(unsigned long ptr,
unsigned long call_site,
struct rb_root *root,
sort_fn_t sort_fn)
{
struct rb_node *node = root->rb_node;
struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
while (node) {
struct alloc_stat *data;
int cmp;
data = rb_entry(node, struct alloc_stat, node);
cmp = sort_fn(&key, data);
if (cmp < 0)
node = node->rb_left;
else if (cmp > 0)
node = node->rb_right;
else
return data;
}
return NULL;
}
static void process_free_event(void *data,
struct event *event,
int cpu,
u64 timestamp __used,
struct thread *thread __used)
{
unsigned long ptr;
struct alloc_stat *s_alloc, *s_caller;
ptr = raw_field_value(event, "ptr", data);
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
if (!s_alloc)
return;
if (cpu != s_alloc->alloc_cpu) {
s_alloc->pingpong++;
s_caller = search_alloc_stat(0, s_alloc->call_site,
&root_caller_stat, callsite_cmp);
assert(s_caller);
s_caller->pingpong++;
}
s_alloc->alloc_cpu = -1;
}
static void process_raw_event(union perf_event *raw_event __used, void *data,
int cpu, u64 timestamp, struct thread *thread)
{
struct event *event;
int type;
type = trace_parse_common_type(data);
event = trace_find_event(type);
if (!strcmp(event->name, "kmalloc") ||
!strcmp(event->name, "kmem_cache_alloc")) {
process_alloc_event(data, event, cpu, timestamp, thread, 0);
return;
}
if (!strcmp(event->name, "kmalloc_node") ||
!strcmp(event->name, "kmem_cache_alloc_node")) {
process_alloc_event(data, event, cpu, timestamp, thread, 1);
return;
}
if (!strcmp(event->name, "kfree") ||
!strcmp(event->name, "kmem_cache_free")) {
process_free_event(data, event, cpu, timestamp, thread);
return;
}
}
static int process_sample_event(struct perf_tool *tool __used,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
process_raw_event(event, sample->raw_data, sample->cpu,
sample->time, thread);
return 0;
}
static struct perf_tool perf_kmem = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
.ordered_samples = true,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
{
if (n_alloc == 0)
return 0.0;
else
return 100.0 - (100.0 * n_req / n_alloc);
}
static void __print_result(struct rb_root *root, struct perf_session *session,
int n_lines, int is_caller)
{
struct rb_node *next;
struct machine *machine;
printf("%.102s\n", graph_dotted_line);
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
printf("%.102s\n", graph_dotted_line);
next = rb_first(root);
machine = perf_session__find_host_machine(session);
if (!machine) {
pr_err("__print_result: couldn't find kernel information\n");
return;
}
while (next && n_lines--) {
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
node);
struct symbol *sym = NULL;
struct map *map;
char buf[BUFSIZ];
u64 addr;
if (is_caller) {
addr = data->call_site;
if (!raw_ip)
sym = machine__find_kernel_function(machine, addr, &map, NULL);
} else
addr = data->ptr;
if (sym != NULL)
snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
addr - map->unmap_ip(map, sym->start));
else
snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
printf(" %-34s |", buf);
printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
(unsigned long long)data->bytes_alloc,
(unsigned long)data->bytes_alloc / data->hit,
(unsigned long long)data->bytes_req,
(unsigned long)data->bytes_req / data->hit,
(unsigned long)data->hit,
(unsigned long)data->pingpong,
fragmentation(data->bytes_req, data->bytes_alloc));
next = rb_next(next);
}
if (n_lines == -1)
printf(" ... | ... | ... | ... | ... | ... \n");
printf("%.102s\n", graph_dotted_line);
}
static void print_summary(void)
{
printf("\nSUMMARY\n=======\n");
printf("Total bytes requested: %lu\n", total_requested);
printf("Total bytes allocated: %lu\n", total_allocated);
printf("Total bytes wasted on internal fragmentation: %lu\n",
total_allocated - total_requested);
printf("Internal fragmentation: %f%%\n",
fragmentation(total_requested, total_allocated));
printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
}
static void print_result(struct perf_session *session)
{
if (caller_flag)
__print_result(&root_caller_sorted, session, caller_lines, 1);
if (alloc_flag)
__print_result(&root_alloc_sorted, session, alloc_lines, 0);
print_summary();
}
struct sort_dimension {
const char name[20];
sort_fn_t cmp;
struct list_head list;
};
static LIST_HEAD(caller_sort);
static LIST_HEAD(alloc_sort);
static void sort_insert(struct rb_root *root, struct alloc_stat *data,
struct list_head *sort_list)
{
struct rb_node **new = &(root->rb_node);
struct rb_node *parent = NULL;
struct sort_dimension *sort;
while (*new) {
struct alloc_stat *this;
int cmp = 0;
this = rb_entry(*new, struct alloc_stat, node);
parent = *new;
list_for_each_entry(sort, sort_list, list) {
cmp = sort->cmp(data, this);
if (cmp)
break;
}
if (cmp > 0)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&data->node, parent, new);
rb_insert_color(&data->node, root);
}
static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
struct list_head *sort_list)
{
struct rb_node *node;
struct alloc_stat *data;
for (;;) {
node = rb_first(root);
if (!node)
break;
rb_erase(node, root);
data = rb_entry(node, struct alloc_stat, node);
sort_insert(root_sorted, data, sort_list);
}
}
static void sort_result(void)
{
__sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
__sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
}
static int __cmd_kmem(void)
{
int err = -EINVAL;
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
0, false, &perf_kmem);
if (session == NULL)
return -ENOMEM;
if (perf_session__create_kernel_maps(session) < 0)
goto out_delete;
if (!perf_session__has_traces(session, "kmem record"))
goto out_delete;
setup_pager();
err = perf_session__process_events(session, &perf_kmem);
if (err != 0)
goto out_delete;
sort_result();
print_result(session);
out_delete:
perf_session__delete(session);
return err;
}
static const char * const kmem_usage[] = {
"perf kmem [<options>] {record|stat}",
NULL
};
static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
if (l->ptr < r->ptr)
return -1;
else if (l->ptr > r->ptr)
return 1;
return 0;
}
static struct sort_dimension ptr_sort_dimension = {
.name = "ptr",
.cmp = ptr_cmp,
};
static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
if (l->call_site < r->call_site)
return -1;
else if (l->call_site > r->call_site)
return 1;
return 0;
}
static struct sort_dimension callsite_sort_dimension = {
.name = "callsite",
.cmp = callsite_cmp,
};
static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
if (l->hit < r->hit)
return -1;
else if (l->hit > r->hit)
return 1;
return 0;
}
static struct sort_dimension hit_sort_dimension = {
.name = "hit",
.cmp = hit_cmp,
};
static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
if (l->bytes_alloc < r->bytes_alloc)
return -1;
else if (l->bytes_alloc > r->bytes_alloc)
return 1;
return 0;
}
static struct sort_dimension bytes_sort_dimension = {
.name = "bytes",
.cmp = bytes_cmp,
};
static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
double x, y;
x = fragmentation(l->bytes_req, l->bytes_alloc);
y = fragmentation(r->bytes_req, r->bytes_alloc);
if (x < y)
return -1;
else if (x > y)
return 1;
return 0;
}
static struct sort_dimension frag_sort_dimension = {
.name = "frag",
.cmp = frag_cmp,
};
static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
{
if (l->pingpong < r->pingpong)
return -1;
else if (l->pingpong > r->pingpong)
return 1;
return 0;
}
static struct sort_dimension pingpong_sort_dimension = {
.name = "pingpong",
.cmp = pingpong_cmp,
};
static struct sort_dimension *avail_sorts[] = {
&ptr_sort_dimension,
&callsite_sort_dimension,
&hit_sort_dimension,
&bytes_sort_dimension,
&frag_sort_dimension,
&pingpong_sort_dimension,
};
#define NUM_AVAIL_SORTS \
(int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
static int sort_dimension__add(const char *tok, struct list_head *list)
{
struct sort_dimension *sort;
int i;
for (i = 0; i < NUM_AVAIL_SORTS; i++) {
if (!strcmp(avail_sorts[i]->name, tok)) {
sort = malloc(sizeof(*sort));
if (!sort)
die("malloc");
memcpy(sort, avail_sorts[i], sizeof(*sort));
list_add_tail(&sort->list, list);
return 0;
}
}
return -1;
}
static int setup_sorting(struct list_head *sort_list, const char *arg)
{
char *tok;
char *str = strdup(arg);
if (!str)
die("strdup");
while (true) {
tok = strsep(&str, ",");
if (!tok)
break;
if (sort_dimension__add(tok, sort_list) < 0) {
error("Unknown --sort key: '%s'", tok);
free(str);
return -1;
}
}
free(str);
return 0;
}
static int parse_sort_opt(const struct option *opt __used,
const char *arg, int unset __used)
{
if (!arg)
return -1;
if (caller_flag > alloc_flag)
return setup_sorting(&caller_sort, arg);
else
return setup_sorting(&alloc_sort, arg);
return 0;
}
static int parse_caller_opt(const struct option *opt __used,
const char *arg __used, int unset __used)
{
caller_flag = (alloc_flag + 1);
return 0;
}
static int parse_alloc_opt(const struct option *opt __used,
const char *arg __used, int unset __used)
{
alloc_flag = (caller_flag + 1);
return 0;
}
static int parse_line_opt(const struct option *opt __used,
const char *arg, int unset __used)
{
int lines;
if (!arg)
return -1;
lines = strtoul(arg, NULL, 10);
if (caller_flag > alloc_flag)
caller_lines = lines;
else
alloc_lines = lines;
return 0;
}
static const struct option kmem_options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
"show per-callsite statistics",
parse_caller_opt),
OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
"show per-allocation statistics",
parse_alloc_opt),
OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
"sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num",
"show n lines",
parse_line_opt),
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
OPT_END()
};
static const char *record_args[] = {
"record",
"-a",
"-R",
"-f",
"-c", "1",
"-e", "kmem:kmalloc",
"-e", "kmem:kmalloc_node",
"-e", "kmem:kfree",
"-e", "kmem:kmem_cache_alloc",
"-e", "kmem:kmem_cache_alloc_node",
"-e", "kmem:kmem_cache_free",
};
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
return cmd_record(i, rec_argv, NULL);
}
int cmd_kmem(int argc, const char **argv, const char *prefix __used)
{
argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
if (!argc)
usage_with_options(kmem_usage, kmem_options);
symbol__init();
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strcmp(argv[0], "stat")) {
setup_cpunode_map();
if (list_empty(&caller_sort))
setup_sorting(&caller_sort, default_sort_order);
if (list_empty(&alloc_sort))
setup_sorting(&alloc_sort, default_sort_order);
return __cmd_kmem();
} else
usage_with_options(kmem_usage, kmem_options);
return 0;
}
| gpl-2.0 |
Renzo-Olivares/android_422_kernel_htc_monarudo | drivers/gpu/drm/savage/savage_drv.c | 5443 | 2710 | /* savage_drv.c -- Savage driver for Linux
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
#include "drm_pciids.h"
static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
static const struct file_operations savage_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.load = savage_driver_load,
.firstopen = savage_driver_firstopen,
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
.reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = &savage_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver savage_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
return drm_pci_init(&driver, &savage_pci_driver);
}
static void __exit savage_exit(void)
{
drm_pci_exit(&driver, &savage_pci_driver);
}
module_init(savage_init);
module_exit(savage_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
| gpl-2.0 |
javelinanddart/android_kernel_samsung_msm8974 | drivers/gpu/drm/savage/savage_drv.c | 5443 | 2710 | /* savage_drv.c -- Savage driver for Linux
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/module.h>
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
#include "drm_pciids.h"
static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
static const struct file_operations savage_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.load = savage_driver_load,
.firstopen = savage_driver_firstopen,
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
.reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = &savage_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static struct pci_driver savage_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
return drm_pci_init(&driver, &savage_pci_driver);
}
static void __exit savage_exit(void)
{
drm_pci_exit(&driver, &savage_pci_driver);
}
module_init(savage_init);
module_exit(savage_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
| gpl-2.0 |
TeamRegular/android_kernel_lge_e2nxx | arch/blackfin/mach-bf533/boards/blackstamp.c | 7235 | 12265 | /*
* Board Info File for the BlackStamp
*
* Copyright 2004-2008 Analog Devices Inc.
* 2008 Benjamin Matthews <bmat@lle.rochester.edu>
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* More info about the BlackStamp at:
* http://blackfin.uclinux.org/gf/project/blackstamp/
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "BlackStamp";
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.name = "smc91x-regs",
.start = 0x20300300,
.end = 0x20300300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF3,
.end = IRQ_PF3,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p64",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 2, /* Framework chip select. */
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 7,
},
#endif
};
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = BFIN_UART_THR,
.end = BFIN_UART_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PF4, 0, "gpio-keys: BTN0"},
{BTN_1, GPIO_PF5, 0, "gpio-keys: BTN1"},
{BTN_2, GPIO_PF6, 0, "gpio-keys: BTN2"},
}; /* Mapped to the first three PF Test Points */
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
#include <linux/i2c-gpio.h>
static struct i2c_gpio_platform_data i2c_gpio_data = {
.sda_pin = GPIO_PF8,
.scl_pin = GPIO_PF9,
.sda_is_open_drain = 0,
.scl_is_open_drain = 0,
.udelay = 40,
}; /* This hasn't actually been used these pins
* are (currently) free pins on the expansion connector */
static struct platform_device i2c_gpio_device = {
.name = "i2c-gpio",
.id = 0,
.dev = {
.platform_data = &i2c_gpio_data,
},
};
#endif
static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
};
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
VRPAIR(VLEV_090, 376000000),
VRPAIR(VLEV_095, 426000000),
VRPAIR(VLEV_100, 426000000),
VRPAIR(VLEV_105, 476000000),
VRPAIR(VLEV_110, 476000000),
VRPAIR(VLEV_115, 476000000),
VRPAIR(VLEV_120, 600000000),
VRPAIR(VLEV_125, 600000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *stamp_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
&i2c_gpio_device,
#endif
};
static int __init blackstamp_init(void)
{
int ret;
printk(KERN_INFO "%s(): registering device resources\n", __func__);
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
ret = platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
if (ret < 0)
return ret;
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
/*
* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
* the bfin-async-map driver takes care of flipping between
* flash and ethernet when necessary.
*/
ret = gpio_request(GPIO_PF0, "enet_cpld");
if (!ret) {
gpio_direction_output(GPIO_PF0, 1);
gpio_free(GPIO_PF0);
}
#endif
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
arch_initcall(blackstamp_init);
static struct platform_device *stamp_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(stamp_early_devices,
ARRAY_SIZE(stamp_early_devices));
}
| gpl-2.0 |
DZB-Team/android_kernel_samsung_amazing | net/ipv4/sysfs_net_ipv4.c | 8003 | 2305 | /*
* net/ipv4/sysfs_net_ipv4.c
*
* sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
*
* Copyright (C) 2008 Google, Inc.
*
* Robert Love <rlove@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/init.h>
#include <net/tcp.h>
#define CREATE_IPV4_FILE(_name, _var) \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%d\n", _var); \
} \
static ssize_t _name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t count) \
{ \
int val, ret; \
ret = sscanf(buf, "%d", &val); \
if (ret != 1) \
return -EINVAL; \
if (val < 0) \
return -EINVAL; \
_var = val; \
return count; \
} \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
static struct attribute *ipv4_attrs[] = {
&tcp_wmem_min_attr.attr,
&tcp_wmem_def_attr.attr,
&tcp_wmem_max_attr.attr,
&tcp_rmem_min_attr.attr,
&tcp_rmem_def_attr.attr,
&tcp_rmem_max_attr.attr,
NULL
};
static struct attribute_group ipv4_attr_group = {
.attrs = ipv4_attrs,
};
static __init int sysfs_ipv4_init(void)
{
struct kobject *ipv4_kobject;
int ret;
ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
if (!ipv4_kobject)
return -ENOMEM;
ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
if (ret) {
kobject_put(ipv4_kobject);
return ret;
}
return 0;
}
subsys_initcall(sysfs_ipv4_init);
| gpl-2.0 |
meizuosc/m35x | drivers/infiniband/hw/ipath/ipath_mad.c | 8515 | 42537 | /*
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_smi.h>
#include <rdma/ib_pma.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
#include "ipath_common.h"
#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
static int reply(struct ib_smp *smp)
{
/*
* The verbs framework will handle the directed/LID route
* packet changes.
*/
smp->method = IB_MGMT_METHOD_GET_RESP;
if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
smp->status |= IB_SMP_DIRECTION;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
static int recv_subn_get_nodedescription(struct ib_smp *smp,
struct ib_device *ibdev)
{
if (smp->attr_mod)
smp->status |= IB_SMP_INVALID_FIELD;
memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
return reply(smp);
}
struct nodeinfo {
u8 base_version;
u8 class_version;
u8 node_type;
u8 num_ports;
__be64 sys_guid;
__be64 node_guid;
__be64 port_guid;
__be16 partition_cap;
__be16 device_id;
__be32 revision;
u8 local_port_num;
u8 vendor_id[3];
} __attribute__ ((packed));
static int recv_subn_get_nodeinfo(struct ib_smp *smp,
struct ib_device *ibdev, u8 port)
{
struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
struct ipath_devdata *dd = to_idev(ibdev)->dd;
u32 vendor, majrev, minrev;
/* GUID 0 is illegal */
if (smp->attr_mod || (dd->ipath_guid == 0))
smp->status |= IB_SMP_INVALID_FIELD;
nip->base_version = 1;
nip->class_version = 1;
nip->node_type = 1; /* channel adapter */
/*
* XXX The num_ports value will need a layer function to get
* the value if we ever have more than one IB port on a chip.
* We will also need to get the GUID for the port.
*/
nip->num_ports = ibdev->phys_port_cnt;
/* This is already in network order */
nip->sys_guid = to_idev(ibdev)->sys_image_guid;
nip->node_guid = dd->ipath_guid;
nip->port_guid = dd->ipath_guid;
nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
nip->device_id = cpu_to_be16(dd->ipath_deviceid);
majrev = dd->ipath_majrev;
minrev = dd->ipath_minrev;
nip->revision = cpu_to_be32((majrev << 16) | minrev);
nip->local_port_num = port;
vendor = dd->ipath_vendorid;
nip->vendor_id[0] = IPATH_SRC_OUI_1;
nip->vendor_id[1] = IPATH_SRC_OUI_2;
nip->vendor_id[2] = IPATH_SRC_OUI_3;
return reply(smp);
}
static int recv_subn_get_guidinfo(struct ib_smp *smp,
struct ib_device *ibdev)
{
u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
__be64 *p = (__be64 *) smp->data;
/* 32 blocks of 8 64-bit GUIDs per block */
memset(smp->data, 0, sizeof(smp->data));
/*
* We only support one GUID for now. If this changes, the
* portinfo.guid_cap field needs to be updated too.
*/
if (startgx == 0) {
__be64 g = to_idev(ibdev)->dd->ipath_guid;
if (g == 0)
/* GUID 0 is illegal */
smp->status |= IB_SMP_INVALID_FIELD;
else
/* The first is a copy of the read-only HW GUID. */
*p = g;
} else
smp->status |= IB_SMP_INVALID_FIELD;
return reply(smp);
}
static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
{
(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
}
static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
{
(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
}
static int get_overrunthreshold(struct ipath_devdata *dd)
{
return (dd->ipath_ibcctrl >>
INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
}
/**
* set_overrunthreshold - set the overrun threshold
* @dd: the infinipath device
* @n: the new threshold
*
* Note that this will only take effect when the link state changes.
*/
static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
{
unsigned v;
v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
if (v != n) {
dd->ipath_ibcctrl &=
~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
dd->ipath_ibcctrl |=
(u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
dd->ipath_ibcctrl);
}
return 0;
}
static int get_phyerrthreshold(struct ipath_devdata *dd)
{
return (dd->ipath_ibcctrl >>
INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
}
/**
* set_phyerrthreshold - set the physical error threshold
* @dd: the infinipath device
* @n: the new threshold
*
* Note that this will only take effect when the link state changes.
*/
static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
{
unsigned v;
v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
if (v != n) {
dd->ipath_ibcctrl &=
~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
dd->ipath_ibcctrl |=
(u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
dd->ipath_ibcctrl);
}
return 0;
}
/**
* get_linkdowndefaultstate - get the default linkdown state
* @dd: the infinipath device
*
* Returns zero if the default is POLL, 1 if the default is SLEEP.
*/
static int get_linkdowndefaultstate(struct ipath_devdata *dd)
{
return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
}
static int recv_subn_get_portinfo(struct ib_smp *smp,
struct ib_device *ibdev, u8 port)
{
struct ipath_ibdev *dev;
struct ipath_devdata *dd;
struct ib_port_info *pip = (struct ib_port_info *)smp->data;
u16 lid;
u8 ibcstat;
u8 mtu;
int ret;
if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
smp->status |= IB_SMP_INVALID_FIELD;
ret = reply(smp);
goto bail;
}
dev = to_idev(ibdev);
dd = dev->dd;
/* Clear all fields. Only set the non-zero fields. */
memset(smp->data, 0, sizeof(smp->data));
/* Only return the mkey if the protection field allows it. */
if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
dev->mkeyprot == 0)
pip->mkey = dev->mkey;
pip->gid_prefix = dev->gid_prefix;
lid = dd->ipath_lid;
pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
pip->sm_lid = cpu_to_be16(dev->sm_lid);
pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
/* pip->diag_code; */
pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
pip->local_port_num = port;
pip->link_width_enabled = dd->ipath_link_width_enabled;
pip->link_width_supported = dd->ipath_link_width_supported;
pip->link_width_active = dd->ipath_link_width_active;
pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
ibcstat = dd->ipath_lastibcstat;
/* map LinkState to IB portinfo values. */
pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
pip->portphysstate_linkdown =
(ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
(get_linkdowndefaultstate(dd) ? 1 : 2);
pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
dd->ipath_link_speed_enabled;
switch (dd->ipath_ibmtu) {
case 4096:
mtu = IB_MTU_4096;
break;
case 2048:
mtu = IB_MTU_2048;
break;
case 1024:
mtu = IB_MTU_1024;
break;
case 512:
mtu = IB_MTU_512;
break;
case 256:
mtu = IB_MTU_256;
break;
default: /* oops, something is wrong */
mtu = IB_MTU_2048;
break;
}
pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
pip->vlcap_inittype = 0x10; /* VLCap = VL0, InitType = 0 */
pip->vl_high_limit = dev->vl_high_limit;
/* pip->vl_arb_high_cap; // only one VL */
/* pip->vl_arb_low_cap; // only one VL */
/* InitTypeReply = 0 */
/* our mtu cap depends on whether 4K MTU enabled or not */
pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
/* HCAs ignore VLStallCount and HOQLife */
/* pip->vlstallcnt_hoqlife; */
pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
/* P_KeyViolations are counted by hardware. */
pip->pkey_violations =
cpu_to_be16((ipath_get_cr_errpkey(dd) -
dev->z_pkey_violations) & 0xFFFF);
pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
/* Only the hardware GUID is supported for now */
pip->guid_cap = 1;
pip->clientrereg_resv_subnetto = dev->subnet_timeout;
/* 32.768 usec. response time (guessing) */
pip->resv_resptimevalue = 3;
pip->localphyerrors_overrunerrors =
(get_phyerrthreshold(dd) << 4) |
get_overrunthreshold(dd);
/* pip->max_credit_hint; */
if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
u32 v;
v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
pip->link_roundtrip_latency[0] = v >> 16;
pip->link_roundtrip_latency[1] = v >> 8;
pip->link_roundtrip_latency[2] = v;
}
ret = reply(smp);
bail:
return ret;
}
/**
* get_pkeys - return the PKEY table for port 0
* @dd: the infinipath device
* @pkeys: the pkey table is placed here
*/
static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
{
/* always a kernel port, no locking needed */
struct ipath_portdata *pd = dd->ipath_pd[0];
memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
return 0;
}
static int recv_subn_get_pkeytable(struct ib_smp *smp,
struct ib_device *ibdev)
{
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
u16 *p = (u16 *) smp->data;
__be16 *q = (__be16 *) smp->data;
/* 64 blocks of 32 16-bit P_Key entries */
memset(smp->data, 0, sizeof(smp->data));
if (startpx == 0) {
struct ipath_ibdev *dev = to_idev(ibdev);
unsigned i, n = ipath_get_npkeys(dev->dd);
get_pkeys(dev->dd, p);
for (i = 0; i < n; i++)
q[i] = cpu_to_be16(p[i]);
} else
smp->status |= IB_SMP_INVALID_FIELD;
return reply(smp);
}
static int recv_subn_set_guidinfo(struct ib_smp *smp,
struct ib_device *ibdev)
{
/* The only GUID we support is the first read-only entry. */
return recv_subn_get_guidinfo(smp, ibdev);
}
/**
* set_linkdowndefaultstate - set the default linkdown state
* @dd: the infinipath device
* @sleep: the new state
*
* Note that this will only take effect when the link state changes.
*/
static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
{
if (sleep)
dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
else
dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
dd->ipath_ibcctrl);
return 0;
}
/**
* recv_subn_set_portinfo - set port information
* @smp: the incoming SM packet
* @ibdev: the infiniband device
* @port: the port on the device
*
* Set Portinfo (see ch. 14.2.5.6).
*/
static int recv_subn_set_portinfo(struct ib_smp *smp,
struct ib_device *ibdev, u8 port)
{
struct ib_port_info *pip = (struct ib_port_info *)smp->data;
struct ib_event event;
struct ipath_ibdev *dev;
struct ipath_devdata *dd;
char clientrereg = 0;
u16 lid, smlid;
u8 lwe;
u8 lse;
u8 state;
u16 lstate;
u32 mtu;
int ret, ore;
if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
goto err;
dev = to_idev(ibdev);
dd = dev->dd;
event.device = ibdev;
event.element.port_num = port;
dev->mkey = pip->mkey;
dev->gid_prefix = pip->gid_prefix;
dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
lid = be16_to_cpu(pip->lid);
if (dd->ipath_lid != lid ||
dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
/* Must be a valid unicast LID address. */
if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
goto err;
ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
event.event = IB_EVENT_LID_CHANGE;
ib_dispatch_event(&event);
}
smlid = be16_to_cpu(pip->sm_lid);
if (smlid != dev->sm_lid) {
/* Must be a valid unicast LID address. */
if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
goto err;
dev->sm_lid = smlid;
event.event = IB_EVENT_SM_CHANGE;
ib_dispatch_event(&event);
}
/* Allow 1x or 4x to be set (see 14.2.6.6). */
lwe = pip->link_width_enabled;
if (lwe) {
if (lwe == 0xFF)
lwe = dd->ipath_link_width_supported;
else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
goto err;
set_link_width_enabled(dd, lwe);
}
/* Allow 2.5 or 5.0 Gbs. */
lse = pip->linkspeedactive_enabled & 0xF;
if (lse) {
if (lse == 15)
lse = dd->ipath_link_speed_supported;
else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
goto err;
set_link_speed_enabled(dd, lse);
}
/* Set link down default state. */
switch (pip->portphysstate_linkdown & 0xF) {
case 0: /* NOP */
break;
case 1: /* SLEEP */
if (set_linkdowndefaultstate(dd, 1))
goto err;
break;
case 2: /* POLL */
if (set_linkdowndefaultstate(dd, 0))
goto err;
break;
default:
goto err;
}
dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
dev->vl_high_limit = pip->vl_high_limit;
switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
case IB_MTU_256:
mtu = 256;
break;
case IB_MTU_512:
mtu = 512;
break;
case IB_MTU_1024:
mtu = 1024;
break;
case IB_MTU_2048:
mtu = 2048;
break;
case IB_MTU_4096:
if (!ipath_mtu4096)
goto err;
mtu = 4096;
break;
default:
/* XXX We have already partially updated our state! */
goto err;
}
ipath_set_mtu(dd, mtu);
dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
/* We only support VL0 */
if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
goto err;
if (pip->mkey_violations == 0)
dev->mkey_violations = 0;
/*
* Hardware counter can't be reset so snapshot and subtract
* later.
*/
if (pip->pkey_violations == 0)
dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
if (pip->qkey_violations == 0)
dev->qkey_violations = 0;
ore = pip->localphyerrors_overrunerrors;
if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
goto err;
if (set_overrunthreshold(dd, (ore & 0xF)))
goto err;
dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
if (pip->clientrereg_resv_subnetto & 0x80) {
clientrereg = 1;
event.event = IB_EVENT_CLIENT_REREGISTER;
ib_dispatch_event(&event);
}
/*
* Do the port state change now that the other link parameters
* have been set.
* Changing the port physical state only makes sense if the link
* is down or is being set to down.
*/
state = pip->linkspeed_portstate & 0xF;
lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
goto err;
/*
* Only state changes of DOWN, ARM, and ACTIVE are valid
* and must be in the correct state to take effect (see 7.2.6).
*/
switch (state) {
case IB_PORT_NOP:
if (lstate == 0)
break;
/* FALLTHROUGH */
case IB_PORT_DOWN:
if (lstate == 0)
lstate = IPATH_IB_LINKDOWN_ONLY;
else if (lstate == 1)
lstate = IPATH_IB_LINKDOWN_SLEEP;
else if (lstate == 2)
lstate = IPATH_IB_LINKDOWN;
else if (lstate == 3)
lstate = IPATH_IB_LINKDOWN_DISABLE;
else
goto err;
ipath_set_linkstate(dd, lstate);
if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
goto done;
}
ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
IPATH_LINKACTIVE, 1000);
break;
case IB_PORT_ARMED:
ipath_set_linkstate(dd, IPATH_IB_LINKARM);
break;
case IB_PORT_ACTIVE:
ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
break;
default:
/* XXX We have already partially updated our state! */
goto err;
}
ret = recv_subn_get_portinfo(smp, ibdev, port);
if (clientrereg)
pip->clientrereg_resv_subnetto |= 0x80;
goto done;
err:
smp->status |= IB_SMP_INVALID_FIELD;
ret = recv_subn_get_portinfo(smp, ibdev, port);
done:
return ret;
}
/**
* rm_pkey - decrecment the reference count for the given PKEY
* @dd: the infinipath device
* @key: the PKEY index
*
* Return true if this was the last reference and the hardware table entry
* needs to be changed.
*/
static int rm_pkey(struct ipath_devdata *dd, u16 key)
{
int i;
int ret;
for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
if (dd->ipath_pkeys[i] != key)
continue;
if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
dd->ipath_pkeys[i] = 0;
ret = 1;
goto bail;
}
break;
}
ret = 0;
bail:
return ret;
}
/**
* add_pkey - add the given PKEY to the hardware table
* @dd: the infinipath device
* @key: the PKEY
*
* Return an error code if unable to add the entry, zero if no change,
* or 1 if the hardware PKEY register needs to be updated.
*/
static int add_pkey(struct ipath_devdata *dd, u16 key)
{
int i;
u16 lkey = key & 0x7FFF;
int any = 0;
int ret;
if (lkey == 0x7FFF) {
ret = 0;
goto bail;
}
/* Look for an empty slot or a matching PKEY. */
for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
if (!dd->ipath_pkeys[i]) {
any++;
continue;
}
/* If it matches exactly, try to increment the ref count */
if (dd->ipath_pkeys[i] == key) {
if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
ret = 0;
goto bail;
}
/* Lost the race. Look for an empty slot below. */
atomic_dec(&dd->ipath_pkeyrefs[i]);
any++;
}
/*
* It makes no sense to have both the limited and unlimited
* PKEY set at the same time since the unlimited one will
* disable the limited one.
*/
if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
ret = -EEXIST;
goto bail;
}
}
if (!any) {
ret = -EBUSY;
goto bail;
}
for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
if (!dd->ipath_pkeys[i] &&
atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
/* for ipathstats, etc. */
ipath_stats.sps_pkeys[i] = lkey;
dd->ipath_pkeys[i] = key;
ret = 1;
goto bail;
}
}
ret = -EBUSY;
bail:
return ret;
}
/**
* set_pkeys - set the PKEY table for port 0
* @dd: the infinipath device
* @pkeys: the PKEY table
*/
static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
{
struct ipath_portdata *pd;
int i;
int changed = 0;
/* always a kernel port, no locking needed */
pd = dd->ipath_pd[0];
for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
u16 key = pkeys[i];
u16 okey = pd->port_pkeys[i];
if (key == okey)
continue;
/*
* The value of this PKEY table entry is changing.
* Remove the old entry in the hardware's array of PKEYs.
*/
if (okey & 0x7FFF)
changed |= rm_pkey(dd, okey);
if (key & 0x7FFF) {
int ret = add_pkey(dd, key);
if (ret < 0)
key = 0;
else
changed |= ret;
}
pd->port_pkeys[i] = key;
}
if (changed) {
u64 pkey;
pkey = (u64) dd->ipath_pkeys[0] |
((u64) dd->ipath_pkeys[1] << 16) |
((u64) dd->ipath_pkeys[2] << 32) |
((u64) dd->ipath_pkeys[3] << 48);
ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
(unsigned long long) pkey);
ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
pkey);
}
return 0;
}
static int recv_subn_set_pkeytable(struct ib_smp *smp,
struct ib_device *ibdev)
{
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
__be16 *p = (__be16 *) smp->data;
u16 *q = (u16 *) smp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
unsigned i, n = ipath_get_npkeys(dev->dd);
for (i = 0; i < n; i++)
q[i] = be16_to_cpu(p[i]);
if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
smp->status |= IB_SMP_INVALID_FIELD;
return recv_subn_get_pkeytable(smp, ibdev);
}
static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
{
struct ib_class_port_info *p =
(struct ib_class_port_info *)pmp->data;
memset(pmp->data, 0, sizeof(pmp->data));
if (pmp->mad_hdr.attr_mod != 0)
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/* Indicate AllPortSelect is valid (only one port anyway) */
p->capability_mask = cpu_to_be16(1 << 8);
p->base_version = 1;
p->class_version = 1;
/*
* Expected response time is 4.096 usec. * 2^18 == 1.073741824
* sec.
*/
p->resp_time_value = 18;
return reply((struct ib_smp *) pmp);
}
/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
* We support 5 counters which only count the mandatory quantities.
*/
#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
COUNTER_MASK(1, 1) | \
COUNTER_MASK(1, 2) | \
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
(struct ib_pma_portsamplescontrol *)pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cregs const *crp = dev->dd->ipath_cregs;
unsigned long flags;
u8 port_select = p->port_select;
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/*
* Ticks are 10x the link transfer period which for 2.5Gbs is 4
* nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
* intervals are counted in ticks. Since we use Linux timers, that
* count in jiffies, we can't sample for less than 1000 ticks if HZ
* == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
* DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
* have hardware support for delaying packets.
*/
if (crp->cr_psstat)
p->tick = dev->dd->ipath_link_speed_active - 1;
else
p->tick = 250; /* 1 usec. */
p->counter_width = 4; /* 32 bit counters */
p->counter_mask0_9 = COUNTER_MASK0_9;
spin_lock_irqsave(&dev->pending_lock, flags);
if (crp->cr_psstat)
p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
else
p->sample_status = dev->pma_sample_status;
p->sample_start = cpu_to_be32(dev->pma_sample_start);
p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
p->tag = cpu_to_be16(dev->pma_tag);
p->counter_select[0] = dev->pma_counter_select[0];
p->counter_select[1] = dev->pma_counter_select[1];
p->counter_select[2] = dev->pma_counter_select[2];
p->counter_select[3] = dev->pma_counter_select[3];
p->counter_select[4] = dev->pma_counter_select[4];
spin_unlock_irqrestore(&dev->pending_lock, flags);
return reply((struct ib_smp *) pmp);
}
static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
(struct ib_pma_portsamplescontrol *)pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cregs const *crp = dev->dd->ipath_cregs;
unsigned long flags;
u8 status;
int ret;
if (pmp->mad_hdr.attr_mod != 0 ||
(p->port_select != port && p->port_select != 0xFF)) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
spin_lock_irqsave(&dev->pending_lock, flags);
if (crp->cr_psstat)
status = ipath_read_creg32(dev->dd, crp->cr_psstat);
else
status = dev->pma_sample_status;
if (status == IB_PMA_SAMPLE_STATUS_DONE) {
dev->pma_sample_start = be32_to_cpu(p->sample_start);
dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
dev->pma_tag = be16_to_cpu(p->tag);
dev->pma_counter_select[0] = p->counter_select[0];
dev->pma_counter_select[1] = p->counter_select[1];
dev->pma_counter_select[2] = p->counter_select[2];
dev->pma_counter_select[3] = p->counter_select[3];
dev->pma_counter_select[4] = p->counter_select[4];
if (crp->cr_psstat) {
ipath_write_creg(dev->dd, crp->cr_psinterval,
dev->pma_sample_interval);
ipath_write_creg(dev->dd, crp->cr_psstart,
dev->pma_sample_start);
} else
dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
bail:
return ret;
}
static u64 get_counter(struct ipath_ibdev *dev,
struct ipath_cregs const *crp,
__be16 sel)
{
u64 ret;
switch (sel) {
case IB_PMA_PORT_XMIT_DATA:
ret = (crp->cr_psxmitdatacount) ?
ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
dev->ipath_sword;
break;
case IB_PMA_PORT_RCV_DATA:
ret = (crp->cr_psrcvdatacount) ?
ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
dev->ipath_rword;
break;
case IB_PMA_PORT_XMIT_PKTS:
ret = (crp->cr_psxmitpktscount) ?
ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
dev->ipath_spkts;
break;
case IB_PMA_PORT_RCV_PKTS:
ret = (crp->cr_psrcvpktscount) ?
ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
dev->ipath_rpkts;
break;
case IB_PMA_PORT_XMIT_WAIT:
ret = (crp->cr_psxmitwaitcount) ?
ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
dev->ipath_xmit_wait;
break;
default:
ret = 0;
}
return ret;
}
static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
struct ib_pma_portsamplesresult *p =
(struct ib_pma_portsamplesresult *)pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cregs const *crp = dev->dd->ipath_cregs;
u8 status;
int i;
memset(pmp->data, 0, sizeof(pmp->data));
p->tag = cpu_to_be16(dev->pma_tag);
if (crp->cr_psstat)
status = ipath_read_creg32(dev->dd, crp->cr_psstat);
else
status = dev->pma_sample_status;
p->sample_status = cpu_to_be16(status);
for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
cpu_to_be32(
get_counter(dev, crp, dev->pma_counter_select[i]));
return reply((struct ib_smp *) pmp);
}
static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
struct ib_pma_portsamplesresult_ext *p =
(struct ib_pma_portsamplesresult_ext *)pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cregs const *crp = dev->dd->ipath_cregs;
u8 status;
int i;
memset(pmp->data, 0, sizeof(pmp->data));
p->tag = cpu_to_be16(dev->pma_tag);
if (crp->cr_psstat)
status = ipath_read_creg32(dev->dd, crp->cr_psstat);
else
status = dev->pma_sample_status;
p->sample_status = cpu_to_be16(status);
/* 64 bits */
p->extended_width = cpu_to_be32(0x80000000);
for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
cpu_to_be64(
get_counter(dev, crp, dev->pma_counter_select[i]));
return reply((struct ib_smp *) pmp);
}
static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_verbs_counters cntrs;
u8 port_select = p->port_select;
ipath_get_counters(dev->dd, &cntrs);
/* Adjust counters for any resets done. */
cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
cntrs.link_error_recovery_counter -=
dev->z_link_error_recovery_counter;
cntrs.link_downed_counter -= dev->z_link_downed_counter;
cntrs.port_rcv_errors += dev->rcv_errors;
cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
cntrs.port_xmit_data -= dev->z_port_xmit_data;
cntrs.port_rcv_data -= dev->z_port_rcv_data;
cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
cntrs.local_link_integrity_errors -=
dev->z_local_link_integrity_errors;
cntrs.excessive_buffer_overrun_errors -=
dev->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= dev->z_vl15_dropped;
cntrs.vl15_dropped += dev->n_vl15_dropped;
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
else
p->symbol_error_counter =
cpu_to_be16((u16)cntrs.symbol_error_counter);
if (cntrs.link_error_recovery_counter > 0xFFUL)
p->link_error_recovery_counter = 0xFF;
else
p->link_error_recovery_counter =
(u8)cntrs.link_error_recovery_counter;
if (cntrs.link_downed_counter > 0xFFUL)
p->link_downed_counter = 0xFF;
else
p->link_downed_counter = (u8)cntrs.link_downed_counter;
if (cntrs.port_rcv_errors > 0xFFFFUL)
p->port_rcv_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_errors =
cpu_to_be16((u16) cntrs.port_rcv_errors);
if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_remphys_errors =
cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
if (cntrs.port_xmit_discards > 0xFFFFUL)
p->port_xmit_discards = cpu_to_be16(0xFFFF);
else
p->port_xmit_discards =
cpu_to_be16((u16)cntrs.port_xmit_discards);
if (cntrs.local_link_integrity_errors > 0xFUL)
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
else
p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
else
p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
else
p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
else
p->port_xmit_packets =
cpu_to_be32((u32)cntrs.port_xmit_packets);
if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
else
p->port_rcv_packets =
cpu_to_be32((u32) cntrs.port_rcv_packets);
return reply((struct ib_smp *) pmp);
}
static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters_ext *p =
(struct ib_pma_portcounters_ext *)pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
u64 swords, rwords, spkts, rpkts, xwait;
u8 port_select = p->port_select;
ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
&rpkts, &xwait);
/* Adjust counters for any resets done. */
swords -= dev->z_port_xmit_data;
rwords -= dev->z_port_rcv_data;
spkts -= dev->z_port_xmit_packets;
rpkts -= dev->z_port_rcv_packets;
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
p->port_xmit_data = cpu_to_be64(swords);
p->port_rcv_data = cpu_to_be64(rwords);
p->port_xmit_packets = cpu_to_be64(spkts);
p->port_rcv_packets = cpu_to_be64(rpkts);
p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
return reply((struct ib_smp *) pmp);
}
static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_verbs_counters cntrs;
/*
* Since the HW doesn't support clearing counters, we save the
* current count and subtract it from future responses.
*/
ipath_get_counters(dev->dd, &cntrs);
if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
dev->z_symbol_error_counter = cntrs.symbol_error_counter;
if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
dev->z_link_error_recovery_counter =
cntrs.link_error_recovery_counter;
if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
dev->z_link_downed_counter = cntrs.link_downed_counter;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
dev->z_port_rcv_errors =
cntrs.port_rcv_errors + dev->rcv_errors;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
dev->z_port_rcv_remphys_errors =
cntrs.port_rcv_remphys_errors;
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
dev->z_port_xmit_discards = cntrs.port_xmit_discards;
if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
dev->z_local_link_integrity_errors =
cntrs.local_link_integrity_errors;
if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
dev->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
dev->n_vl15_dropped = 0;
dev->z_vl15_dropped = cntrs.vl15_dropped;
}
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
dev->z_port_xmit_data = cntrs.port_xmit_data;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
dev->z_port_rcv_data = cntrs.port_rcv_data;
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
dev->z_port_xmit_packets = cntrs.port_xmit_packets;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
dev->z_port_rcv_packets = cntrs.port_rcv_packets;
return recv_pma_get_portcounters(pmp, ibdev, port);
}
static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
u64 swords, rwords, spkts, rpkts, xwait;
ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
&rpkts, &xwait);
if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
dev->z_port_xmit_data = swords;
if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
dev->z_port_rcv_data = rwords;
if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
dev->z_port_xmit_packets = spkts;
if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
dev->z_port_rcv_packets = rpkts;
if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
dev->n_unicast_xmit = 0;
if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
dev->n_unicast_rcv = 0;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
dev->n_multicast_xmit = 0;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
dev->n_multicast_rcv = 0;
return recv_pma_get_portcounters_ext(pmp, ibdev, port);
}
static int process_subn(struct ib_device *ibdev, int mad_flags,
u8 port_num, struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_smp *smp = (struct ib_smp *)out_mad;
struct ipath_ibdev *dev = to_idev(ibdev);
int ret;
*out_mad = *in_mad;
if (smp->class_version != 1) {
smp->status |= IB_SMP_UNSUP_VERSION;
ret = reply(smp);
goto bail;
}
/* Is the mkey in the process of expiring? */
if (dev->mkey_lease_timeout &&
time_after_eq(jiffies, dev->mkey_lease_timeout)) {
/* Clear timeout and mkey protection field. */
dev->mkey_lease_timeout = 0;
dev->mkeyprot = 0;
}
/*
* M_Key checking depends on
* Portinfo:M_Key_protect_bits
*/
if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
dev->mkey != smp->mkey &&
(smp->method == IB_MGMT_METHOD_SET ||
(smp->method == IB_MGMT_METHOD_GET &&
dev->mkeyprot >= 2))) {
if (dev->mkey_violations != 0xFFFF)
++dev->mkey_violations;
if (dev->mkey_lease_timeout ||
dev->mkey_lease_period == 0) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
dev->mkey_lease_timeout = jiffies +
dev->mkey_lease_period * HZ;
/* Future: Generate a trap notice. */
ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
goto bail;
} else if (dev->mkey_lease_timeout)
dev->mkey_lease_timeout = 0;
switch (smp->method) {
case IB_MGMT_METHOD_GET:
switch (smp->attr_id) {
case IB_SMP_ATTR_NODE_DESC:
ret = recv_subn_get_nodedescription(smp, ibdev);
goto bail;
case IB_SMP_ATTR_NODE_INFO:
ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_GUID_INFO:
ret = recv_subn_get_guidinfo(smp, ibdev);
goto bail;
case IB_SMP_ATTR_PORT_INFO:
ret = recv_subn_get_portinfo(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_PKEY_TABLE:
ret = recv_subn_get_pkeytable(smp, ibdev);
goto bail;
case IB_SMP_ATTR_SM_INFO:
if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
if (dev->port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
/* FALLTHROUGH */
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
goto bail;
}
case IB_MGMT_METHOD_SET:
switch (smp->attr_id) {
case IB_SMP_ATTR_GUID_INFO:
ret = recv_subn_set_guidinfo(smp, ibdev);
goto bail;
case IB_SMP_ATTR_PORT_INFO:
ret = recv_subn_set_portinfo(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_PKEY_TABLE:
ret = recv_subn_set_pkeytable(smp, ibdev);
goto bail;
case IB_SMP_ATTR_SM_INFO:
if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
if (dev->port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
/* FALLTHROUGH */
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
goto bail;
}
case IB_MGMT_METHOD_TRAP:
case IB_MGMT_METHOD_REPORT:
case IB_MGMT_METHOD_REPORT_RESP:
case IB_MGMT_METHOD_TRAP_REPRESS:
case IB_MGMT_METHOD_GET_RESP:
/*
* The ib_mad module will call us to process responses
* before checking for other consumers.
* Just tell the caller to process it normally.
*/
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
default:
smp->status |= IB_SMP_UNSUP_METHOD;
ret = reply(smp);
}
bail:
return ret;
}
static int process_perf(struct ib_device *ibdev, u8 port_num,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
int ret;
*out_mad = *in_mad;
if (pmp->mad_hdr.class_version != 1) {
pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
switch (pmp->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
switch (pmp->mad_hdr.attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = recv_pma_get_classportinfo(pmp);
goto bail;
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
port_num);
goto bail;
case IB_PMA_PORT_SAMPLES_RESULT:
ret = recv_pma_get_portsamplesresult(pmp, ibdev);
goto bail;
case IB_PMA_PORT_SAMPLES_RESULT_EXT:
ret = recv_pma_get_portsamplesresult_ext(pmp,
ibdev);
goto bail;
case IB_PMA_PORT_COUNTERS:
ret = recv_pma_get_portcounters(pmp, ibdev,
port_num);
goto bail;
case IB_PMA_PORT_COUNTERS_EXT:
ret = recv_pma_get_portcounters_ext(pmp, ibdev,
port_num);
goto bail;
default:
pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_SET:
switch (pmp->mad_hdr.attr_id) {
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
port_num);
goto bail;
case IB_PMA_PORT_COUNTERS:
ret = recv_pma_set_portcounters(pmp, ibdev,
port_num);
goto bail;
case IB_PMA_PORT_COUNTERS_EXT:
ret = recv_pma_set_portcounters_ext(pmp, ibdev,
port_num);
goto bail;
default:
pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_GET_RESP:
/*
* The ib_mad module will call us to process responses
* before checking for other consumers.
* Just tell the caller to process it normally.
*/
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
default:
pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) pmp);
}
bail:
return ret;
}
/**
* ipath_process_mad - process an incoming MAD packet
* @ibdev: the infiniband device this packet came in on
* @mad_flags: MAD flags
* @port_num: the port number this packet came in on
* @in_wc: the work completion entry for this packet
* @in_grh: the global route header for this packet
* @in_mad: the incoming MAD
* @out_mad: any outgoing MAD reply
*
* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
* interested in processing.
*
* Note that the verbs framework has already done the MAD sanity checks,
* and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
* MADs.
*
* This is called by the ib_mad module.
*/
int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
int ret;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
ret = process_subn(ibdev, mad_flags, port_num,
in_mad, out_mad);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
ret = process_perf(ibdev, port_num, in_mad, out_mad);
goto bail;
default:
ret = IB_MAD_RESULT_SUCCESS;
}
bail:
return ret;
}
| gpl-2.0 |
yajnab/android_kernel_samsung_i8260 | drivers/infiniband/hw/mthca/mthca_av.c | 14147 | 10095 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include "mthca_dev.h"
enum {
MTHCA_RATE_TAVOR_FULL = 0,
MTHCA_RATE_TAVOR_1X = 1,
MTHCA_RATE_TAVOR_4X = 2,
MTHCA_RATE_TAVOR_1X_DDR = 3
};
enum {
MTHCA_RATE_MEMFREE_FULL = 0,
MTHCA_RATE_MEMFREE_QUARTER = 1,
MTHCA_RATE_MEMFREE_EIGHTH = 2,
MTHCA_RATE_MEMFREE_HALF = 3
};
struct mthca_av {
__be32 port_pd;
u8 reserved1;
u8 g_slid;
__be16 dlid;
u8 reserved2;
u8 gid_index;
u8 msg_sr;
u8 hop_limit;
__be32 sl_tclass_flowlabel;
__be32 dgid[4];
};
static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
{
switch (mthca_rate) {
case MTHCA_RATE_MEMFREE_EIGHTH:
return mult_to_ib_rate(port_rate >> 3);
case MTHCA_RATE_MEMFREE_QUARTER:
return mult_to_ib_rate(port_rate >> 2);
case MTHCA_RATE_MEMFREE_HALF:
return mult_to_ib_rate(port_rate >> 1);
case MTHCA_RATE_MEMFREE_FULL:
default:
return mult_to_ib_rate(port_rate);
}
}
static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
{
switch (mthca_rate) {
case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
default: return mult_to_ib_rate(port_rate);
}
}
enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
{
if (mthca_is_memfree(dev)) {
/* Handle old Arbel FW */
if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
return IB_RATE_2_5_GBPS;
return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
} else
return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
}
static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
{
if (cur_rate <= req_rate)
return 0;
/*
* Inter-packet delay (IPD) to get from rate X down to a rate
* no more than Y is (X - 1) / Y.
*/
switch ((cur_rate - 1) / req_rate) {
case 0: return MTHCA_RATE_MEMFREE_FULL;
case 1: return MTHCA_RATE_MEMFREE_HALF;
case 2: /* fall through */
case 3: return MTHCA_RATE_MEMFREE_QUARTER;
default: return MTHCA_RATE_MEMFREE_EIGHTH;
}
}
static u8 ib_rate_to_tavor(u8 static_rate)
{
switch (static_rate) {
case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR;
case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X;
default: return MTHCA_RATE_TAVOR_FULL;
}
}
u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
{
u8 rate;
if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
return 0;
if (mthca_is_memfree(dev))
rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
dev->rate[port - 1]);
else
rate = ib_rate_to_tavor(static_rate);
if (!(dev->limits.stat_rate_support & (1 << rate)))
rate = 1;
return rate;
}
int mthca_create_ah(struct mthca_dev *dev,
struct mthca_pd *pd,
struct ib_ah_attr *ah_attr,
struct mthca_ah *ah)
{
u32 index = -1;
struct mthca_av *av = NULL;
ah->type = MTHCA_AH_PCI_POOL;
if (mthca_is_memfree(dev)) {
ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC);
if (!ah->av)
return -ENOMEM;
ah->type = MTHCA_AH_KMALLOC;
av = ah->av;
} else if (!atomic_read(&pd->sqp_count) &&
!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
index = mthca_alloc(&dev->av_table.alloc);
/* fall back to allocate in host memory */
if (index == -1)
goto on_hca_fail;
av = kmalloc(sizeof *av, GFP_ATOMIC);
if (!av)
goto on_hca_fail;
ah->type = MTHCA_AH_ON_HCA;
ah->avdma = dev->av_table.ddr_av_base +
index * MTHCA_AV_SIZE;
}
on_hca_fail:
if (ah->type == MTHCA_AH_PCI_POOL) {
ah->av = pci_pool_alloc(dev->av_table.pool,
GFP_ATOMIC, &ah->avdma);
if (!ah->av)
return -ENOMEM;
av = ah->av;
}
ah->key = pd->ntmr.ibmr.lkey;
memset(av, 0, MTHCA_AV_SIZE);
av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
av->g_slid = ah_attr->src_path_bits;
av->dlid = cpu_to_be16(ah_attr->dlid);
av->msg_sr = (3 << 4) | /* 2K message */
mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num);
av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
if (ah_attr->ah_flags & IB_AH_GRH) {
av->g_slid |= 0x80;
av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len +
ah_attr->grh.sgid_index;
av->hop_limit = ah_attr->grh.hop_limit;
av->sl_tclass_flowlabel |=
cpu_to_be32((ah_attr->grh.traffic_class << 20) |
ah_attr->grh.flow_label);
memcpy(av->dgid, ah_attr->grh.dgid.raw, 16);
} else {
/* Arbel workaround -- low byte of GID must be 2 */
av->dgid[3] = cpu_to_be32(2);
}
if (0) {
int j;
mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
av, (unsigned long) ah->avdma);
for (j = 0; j < 8; ++j)
printk(KERN_DEBUG " [%2x] %08x\n",
j * 4, be32_to_cpu(((__be32 *) av)[j]));
}
if (ah->type == MTHCA_AH_ON_HCA) {
memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
av, MTHCA_AV_SIZE);
kfree(av);
}
return 0;
}
int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
{
switch (ah->type) {
case MTHCA_AH_ON_HCA:
mthca_free(&dev->av_table.alloc,
(ah->avdma - dev->av_table.ddr_av_base) /
MTHCA_AV_SIZE);
break;
case MTHCA_AH_PCI_POOL:
pci_pool_free(dev->av_table.pool, ah->av, ah->avdma);
break;
case MTHCA_AH_KMALLOC:
kfree(ah->av);
break;
}
return 0;
}
int mthca_ah_grh_present(struct mthca_ah *ah)
{
return !!(ah->av->g_slid & 0x80);
}
int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header)
{
if (ah->type == MTHCA_AH_ON_HCA)
return -EINVAL;
header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
header->lrh.destination_lid = ah->av->dlid;
header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
if (mthca_ah_grh_present(ah)) {
header->grh.traffic_class =
(be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
header->grh.flow_label =
ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
header->grh.hop_limit = ah->av->hop_limit;
ib_get_cached_gid(&dev->ib_dev,
be32_to_cpu(ah->av->port_pd) >> 24,
ah->av->gid_index % dev->limits.gid_table_len,
&header->grh.source_gid);
memcpy(header->grh.destination_gid.raw,
ah->av->dgid, 16);
}
return 0;
}
int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr)
{
struct mthca_ah *ah = to_mah(ibah);
struct mthca_dev *dev = to_mdev(ibah->device);
/* Only implement for MAD and memfree ah for now. */
if (ah->type == MTHCA_AH_ON_HCA)
return -ENOSYS;
memset(attr, 0, sizeof *attr);
attr->dlid = be16_to_cpu(ah->av->dlid);
attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24;
attr->static_rate = mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
attr->port_num);
attr->src_path_bits = ah->av->g_slid & 0x7F;
attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0;
if (attr->ah_flags) {
attr->grh.traffic_class =
be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20;
attr->grh.flow_label =
be32_to_cpu(ah->av->sl_tclass_flowlabel) & 0xfffff;
attr->grh.hop_limit = ah->av->hop_limit;
attr->grh.sgid_index = ah->av->gid_index &
(dev->limits.gid_table_len - 1);
memcpy(attr->grh.dgid.raw, ah->av->dgid, 16);
}
return 0;
}
int mthca_init_av_table(struct mthca_dev *dev)
{
int err;
if (mthca_is_memfree(dev))
return 0;
err = mthca_alloc_init(&dev->av_table.alloc,
dev->av_table.num_ddr_avs,
dev->av_table.num_ddr_avs - 1,
0);
if (err)
return err;
dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev,
MTHCA_AV_SIZE,
MTHCA_AV_SIZE, 0);
if (!dev->av_table.pool)
goto out_free_alloc;
if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) +
dev->av_table.ddr_av_base -
dev->ddr_start,
dev->av_table.num_ddr_avs *
MTHCA_AV_SIZE);
if (!dev->av_table.av_map)
goto out_free_pool;
} else
dev->av_table.av_map = NULL;
return 0;
out_free_pool:
pci_pool_destroy(dev->av_table.pool);
out_free_alloc:
mthca_alloc_cleanup(&dev->av_table.alloc);
return -ENOMEM;
}
void mthca_cleanup_av_table(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev))
return;
if (dev->av_table.av_map)
iounmap(dev->av_table.av_map);
pci_pool_destroy(dev->av_table.pool);
mthca_alloc_cleanup(&dev->av_table.alloc);
}
| gpl-2.0 |
Falklore/shamu | net/llc/llc_s_ev.c | 15683 | 3613 | /*
* llc_s_ev.c - Defines SAP component events
*
* The followed event functions are SAP component events which are described
* in 802.2 LLC protocol standard document.
*
* Copyright (c) 1997 by Procom Technology, Inc.
* 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program can be redistributed or modified under the terms of the
* GNU General Public License as published by the Free Software Foundation.
* This program is distributed without any warranty or implied warranty
* of merchantability or fitness for a particular purpose.
*
* See the GNU General Public License for more details.
*/
#include <linux/socket.h>
#include <net/sock.h>
#include <net/llc_if.h>
#include <net/llc_s_ev.h>
#include <net/llc_pdu.h>
int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
return ev->type == LLC_SAP_EV_TYPE_SIMPLE &&
ev->prim_type == LLC_SAP_EV_ACTIVATION_REQ ? 0 : 1;
}
int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) &&
LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_UI ? 0 : 1;
}
int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
return ev->type == LLC_SAP_EV_TYPE_PRIM &&
ev->prim == LLC_DATAUNIT_PRIM &&
ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
}
int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
return ev->type == LLC_SAP_EV_TYPE_PRIM &&
ev->prim == LLC_XID_PRIM &&
ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
}
int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) &&
LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1;
}
int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) &&
LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1;
}
int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
return ev->type == LLC_SAP_EV_TYPE_PRIM &&
ev->prim == LLC_TEST_PRIM &&
ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
}
int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) &&
LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1;
}
int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) &&
LLC_PDU_TYPE_IS_U(pdu) &&
LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1;
}
int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
return ev->type == LLC_SAP_EV_TYPE_SIMPLE &&
ev->prim_type == LLC_SAP_EV_DEACTIVATION_REQ ? 0 : 1;
}
| gpl-2.0 |
heyoufei2/yocto3.14.38_kernel | sound/soc/pxa/e740_wm9705.c | 324 | 4899 | /*
* e740-wm9705.c -- SoC audio for e740
*
* Copyright 2007 (c) Ian Molton <spyro@f2s.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 ONLY.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <mach/audio.h>
#include <mach/eseries-gpio.h>
#include <asm/mach-types.h>
#include "../codecs/wm9705.h"
#include "pxa2xx-ac97.h"
#define E740_AUDIO_OUT 1
#define E740_AUDIO_IN 2
static int e740_audio_power;
static void e740_sync_audio_power(int status)
{
gpio_set_value(GPIO_E740_WM9705_nAVDD2, !status);
gpio_set_value(GPIO_E740_AMP_ON, (status & E740_AUDIO_OUT) ? 1 : 0);
gpio_set_value(GPIO_E740_MIC_ON, (status & E740_AUDIO_IN) ? 1 : 0);
}
static int e740_mic_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (event & SND_SOC_DAPM_PRE_PMU)
e740_audio_power |= E740_AUDIO_IN;
else if (event & SND_SOC_DAPM_POST_PMD)
e740_audio_power &= ~E740_AUDIO_IN;
e740_sync_audio_power(e740_audio_power);
return 0;
}
static int e740_output_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (event & SND_SOC_DAPM_PRE_PMU)
e740_audio_power |= E740_AUDIO_OUT;
else if (event & SND_SOC_DAPM_POST_PMD)
e740_audio_power &= ~E740_AUDIO_OUT;
e740_sync_audio_power(e740_audio_power);
return 0;
}
static const struct snd_soc_dapm_widget e740_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_SPK("Speaker", NULL),
SND_SOC_DAPM_MIC("Mic (Internal)", NULL),
SND_SOC_DAPM_PGA_E("Output Amp", SND_SOC_NOPM, 0, 0, NULL, 0,
e740_output_amp_event, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA_E("Mic Amp", SND_SOC_NOPM, 0, 0, NULL, 0,
e740_mic_amp_event, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route audio_map[] = {
{"Output Amp", NULL, "LOUT"},
{"Output Amp", NULL, "ROUT"},
{"Output Amp", NULL, "MONOOUT"},
{"Speaker", NULL, "Output Amp"},
{"Headphone Jack", NULL, "Output Amp"},
{"MIC1", NULL, "Mic Amp"},
{"Mic Amp", NULL, "Mic (Internal)"},
};
static int e740_ac97_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
snd_soc_dapm_nc_pin(dapm, "HPOUTL");
snd_soc_dapm_nc_pin(dapm, "HPOUTR");
snd_soc_dapm_nc_pin(dapm, "PHONE");
snd_soc_dapm_nc_pin(dapm, "LINEINL");
snd_soc_dapm_nc_pin(dapm, "LINEINR");
snd_soc_dapm_nc_pin(dapm, "CDINL");
snd_soc_dapm_nc_pin(dapm, "CDINR");
snd_soc_dapm_nc_pin(dapm, "PCBEEP");
snd_soc_dapm_nc_pin(dapm, "MIC2");
snd_soc_dapm_new_controls(dapm, e740_dapm_widgets,
ARRAY_SIZE(e740_dapm_widgets));
snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
return 0;
}
static struct snd_soc_dai_link e740_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
.cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9705-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
.init = e740_ac97_init,
},
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
.cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9705-aux",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm9705-codec",
},
};
static struct snd_soc_card e740 = {
.name = "Toshiba e740",
.owner = THIS_MODULE,
.dai_link = e740_dai,
.num_links = ARRAY_SIZE(e740_dai),
};
static struct gpio e740_audio_gpios[] = {
{ GPIO_E740_MIC_ON, GPIOF_OUT_INIT_LOW, "Mic amp" },
{ GPIO_E740_AMP_ON, GPIOF_OUT_INIT_LOW, "Output amp" },
{ GPIO_E740_WM9705_nAVDD2, GPIOF_OUT_INIT_HIGH, "Audio power" },
};
static int e740_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &e740;
int ret;
ret = gpio_request_array(e740_audio_gpios,
ARRAY_SIZE(e740_audio_gpios));
if (ret)
return ret;
card->dev = &pdev->dev;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios));
}
return ret;
}
static int e740_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
gpio_free_array(e740_audio_gpios, ARRAY_SIZE(e740_audio_gpios));
snd_soc_unregister_card(card);
return 0;
}
static struct platform_driver e740_driver = {
.driver = {
.name = "e740-audio",
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = e740_probe,
.remove = e740_remove,
};
module_platform_driver(e740_driver);
/* Module information */
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
MODULE_DESCRIPTION("ALSA SoC driver for e740");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:e740-audio");
| gpl-2.0 |
tb-303/GFRG110 | drivers/base/devres.c | 580 | 16306 | /*
* drivers/base/devres.c - device resource management
*
* Copyright (c) 2006 SUSE Linux Products GmbH
* Copyright (c) 2006 Tejun Heo <teheo@suse.de>
*
* This file is released under the GPLv2.
*/
#include <linux/device.h>
#include <linux/module.h>
#include "base.h"
struct devres_node {
struct list_head entry;
dr_release_t release;
#ifdef CONFIG_DEBUG_DEVRES
const char *name;
size_t size;
#endif
};
struct devres {
struct devres_node node;
/* -- 3 pointers */
unsigned long long data[]; /* guarantee ull alignment */
};
struct devres_group {
struct devres_node node[2];
void *id;
int color;
/* -- 8 pointers */
};
#ifdef CONFIG_DEBUG_DEVRES
static int log_devres = 0;
module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
static void set_node_dbginfo(struct devres_node *node, const char *name,
size_t size)
{
node->name = name;
node->size = size;
}
static void devres_log(struct device *dev, struct devres_node *node,
const char *op)
{
if (unlikely(log_devres))
dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
op, node, node->name, (unsigned long)node->size);
}
#else /* CONFIG_DEBUG_DEVRES */
#define set_node_dbginfo(node, n, s) do {} while (0)
#define devres_log(dev, node, op) do {} while (0)
#endif /* CONFIG_DEBUG_DEVRES */
/*
* Release functions for devres group. These callbacks are used only
* for identification.
*/
static void group_open_release(struct device *dev, void *res)
{
/* noop */
}
static void group_close_release(struct device *dev, void *res)
{
/* noop */
}
static struct devres_group * node_to_group(struct devres_node *node)
{
if (node->release == &group_open_release)
return container_of(node, struct devres_group, node[0]);
if (node->release == &group_close_release)
return container_of(node, struct devres_group, node[1]);
return NULL;
}
static __always_inline struct devres * alloc_dr(dr_release_t release,
size_t size, gfp_t gfp)
{
size_t tot_size = sizeof(struct devres) + size;
struct devres *dr;
dr = kmalloc_track_caller(tot_size, gfp);
if (unlikely(!dr))
return NULL;
memset(dr, 0, tot_size);
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
return dr;
}
static void add_dr(struct device *dev, struct devres_node *node)
{
devres_log(dev, node, "ADD");
BUG_ON(!list_empty(&node->entry));
list_add_tail(&node->entry, &dev->devres_head);
}
#ifdef CONFIG_DEBUG_DEVRES
void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
const char *name)
{
struct devres *dr;
dr = alloc_dr(release, size, gfp);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
EXPORT_SYMBOL_GPL(__devres_alloc);
#else
/**
* devres_alloc - Allocate device resource data
* @release: Release function devres will be associated with
* @size: Allocation size
* @gfp: Allocation flags
*
* Allocate devres of @size bytes. The allocated area is zeroed, then
* associated with @release. The returned pointer can be passed to
* other devres_*() functions.
*
* RETURNS:
* Pointer to allocated devres on success, NULL on failure.
*/
void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
struct devres *dr;
dr = alloc_dr(release, size, gfp);
if (unlikely(!dr))
return NULL;
return dr->data;
}
EXPORT_SYMBOL_GPL(devres_alloc);
#endif
/**
* devres_free - Free device resource data
* @res: Pointer to devres data to free
*
* Free devres created with devres_alloc().
*/
void devres_free(void *res)
{
if (res) {
struct devres *dr = container_of(res, struct devres, data);
BUG_ON(!list_empty(&dr->node.entry));
kfree(dr);
}
}
EXPORT_SYMBOL_GPL(devres_free);
/**
* devres_add - Register device resource
* @dev: Device to add resource to
* @res: Resource to register
*
* Register devres @res to @dev. @res should have been allocated
* using devres_alloc(). On driver detach, the associated release
* function will be invoked and devres will be freed automatically.
*/
void devres_add(struct device *dev, void *res)
{
struct devres *dr = container_of(res, struct devres, data);
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &dr->node);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
EXPORT_SYMBOL_GPL(devres_add);
static struct devres *find_dr(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres_node *node;
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
struct devres *dr = container_of(node, struct devres, node);
if (node->release != release)
continue;
if (match && !match(dev, dr->data, match_data))
continue;
return dr;
}
return NULL;
}
/**
* devres_find - Find device resource
* @dev: Device to lookup resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev which is associated with @release
* and for which @match returns 1. If @match is NULL, it's considered
* to match all.
*
* RETURNS:
* Pointer to found devres, NULL if not found.
*/
void * devres_find(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
dr = find_dr(dev, release, match, match_data);
spin_unlock_irqrestore(&dev->devres_lock, flags);
if (dr)
return dr->data;
return NULL;
}
EXPORT_SYMBOL_GPL(devres_find);
/**
* devres_get - Find devres, if non-existent, add one atomically
* @dev: Device to lookup or add devres for
* @new_res: Pointer to new initialized devres to add if not found
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev which has the same release function
* as @new_res and for which @match return 1. If found, @new_res is
* freed; otherwise, @new_res is added atomically.
*
* RETURNS:
* Pointer to found or added devres.
*/
void * devres_get(struct device *dev, void *new_res,
dr_match_t match, void *match_data)
{
struct devres *new_dr = container_of(new_res, struct devres, data);
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
dr = find_dr(dev, new_dr->node.release, match, match_data);
if (!dr) {
add_dr(dev, &new_dr->node);
dr = new_dr;
new_dr = NULL;
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
devres_free(new_dr);
return dr->data;
}
EXPORT_SYMBOL_GPL(devres_get);
/**
* devres_remove - Find a device resource and remove it
* @dev: Device to find resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev associated with @release and for
* which @match returns 1. If @match is NULL, it's considered to
* match all. If found, the resource is removed atomically and
* returned.
*
* RETURNS:
* Pointer to removed devres on success, NULL if not found.
*/
void * devres_remove(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
dr = find_dr(dev, release, match, match_data);
if (dr) {
list_del_init(&dr->node.entry);
devres_log(dev, &dr->node, "REM");
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
if (dr)
return dr->data;
return NULL;
}
EXPORT_SYMBOL_GPL(devres_remove);
/**
* devres_destroy - Find a device resource and destroy it
* @dev: Device to find resource from
* @release: Look for resources associated with this release function
* @match: Match function (optional)
* @match_data: Data for the match function
*
* Find the latest devres of @dev associated with @release and for
* which @match returns 1. If @match is NULL, it's considered to
* match all. If found, the resource is removed atomically and freed.
*
* RETURNS:
* 0 if devres is found and freed, -ENOENT if not found.
*/
int devres_destroy(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
void *res;
res = devres_remove(dev, release, match, match_data);
if (unlikely(!res))
return -ENOENT;
devres_free(res);
return 0;
}
EXPORT_SYMBOL_GPL(devres_destroy);
static int remove_nodes(struct device *dev,
struct list_head *first, struct list_head *end,
struct list_head *todo)
{
int cnt = 0, nr_groups = 0;
struct list_head *cur;
/* First pass - move normal devres entries to @todo and clear
* devres_group colors.
*/
cur = first;
while (cur != end) {
struct devres_node *node;
struct devres_group *grp;
node = list_entry(cur, struct devres_node, entry);
cur = cur->next;
grp = node_to_group(node);
if (grp) {
/* clear color of group markers in the first pass */
grp->color = 0;
nr_groups++;
} else {
/* regular devres entry */
if (&node->entry == first)
first = first->next;
list_move_tail(&node->entry, todo);
cnt++;
}
}
if (!nr_groups)
return cnt;
/* Second pass - Scan groups and color them. A group gets
* color value of two iff the group is wholly contained in
* [cur, end). That is, for a closed group, both opening and
* closing markers should be in the range, while just the
* opening marker is enough for an open group.
*/
cur = first;
while (cur != end) {
struct devres_node *node;
struct devres_group *grp;
node = list_entry(cur, struct devres_node, entry);
cur = cur->next;
grp = node_to_group(node);
BUG_ON(!grp || list_empty(&grp->node[0].entry));
grp->color++;
if (list_empty(&grp->node[1].entry))
grp->color++;
BUG_ON(grp->color <= 0 || grp->color > 2);
if (grp->color == 2) {
/* No need to update cur or end. The removed
* nodes are always before both.
*/
list_move_tail(&grp->node[0].entry, todo);
list_del_init(&grp->node[1].entry);
}
}
return cnt;
}
static int release_nodes(struct device *dev, struct list_head *first,
struct list_head *end, unsigned long flags)
{
LIST_HEAD(todo);
int cnt;
struct devres *dr, *tmp;
cnt = remove_nodes(dev, first, end, &todo);
spin_unlock_irqrestore(&dev->devres_lock, flags);
/* Release. Note that both devres and devres_group are
* handled as devres in the following loop. This is safe.
*/
list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
devres_log(dev, &dr->node, "REL");
dr->node.release(dev, dr->data);
kfree(dr);
}
return cnt;
}
/**
* devres_release_all - Release all managed resources
* @dev: Device to release resources for
*
* Release all resources associated with @dev. This function is
* called on driver detach.
*/
int devres_release_all(struct device *dev)
{
unsigned long flags;
/* Looks like an uninitialized device structure */
if (WARN_ON(dev->devres_head.next == NULL))
return -ENODEV;
spin_lock_irqsave(&dev->devres_lock, flags);
return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
flags);
}
/**
* devres_open_group - Open a new devres group
* @dev: Device to open devres group for
* @id: Separator ID
* @gfp: Allocation flags
*
* Open a new devres group for @dev with @id. For @id, using a
* pointer to an object which won't be used for another group is
* recommended. If @id is NULL, address-wise unique ID is created.
*
* RETURNS:
* ID of the new group, NULL on failure.
*/
void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
{
struct devres_group *grp;
unsigned long flags;
grp = kmalloc(sizeof(*grp), gfp);
if (unlikely(!grp))
return NULL;
grp->node[0].release = &group_open_release;
grp->node[1].release = &group_close_release;
INIT_LIST_HEAD(&grp->node[0].entry);
INIT_LIST_HEAD(&grp->node[1].entry);
set_node_dbginfo(&grp->node[0], "grp<", 0);
set_node_dbginfo(&grp->node[1], "grp>", 0);
grp->id = grp;
if (id)
grp->id = id;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
spin_unlock_irqrestore(&dev->devres_lock, flags);
return grp->id;
}
EXPORT_SYMBOL_GPL(devres_open_group);
/* Find devres group with ID @id. If @id is NULL, look for the latest. */
static struct devres_group * find_group(struct device *dev, void *id)
{
struct devres_node *node;
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
struct devres_group *grp;
if (node->release != &group_open_release)
continue;
grp = container_of(node, struct devres_group, node[0]);
if (id) {
if (grp->id == id)
return grp;
} else if (list_empty(&grp->node[1].entry))
return grp;
}
return NULL;
}
/**
* devres_close_group - Close a devres group
* @dev: Device to close devres group for
* @id: ID of target group, can be NULL
*
* Close the group identified by @id. If @id is NULL, the latest open
* group is selected.
*/
void devres_close_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
grp = find_group(dev, id);
if (grp)
add_dr(dev, &grp->node[1]);
else
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
EXPORT_SYMBOL_GPL(devres_close_group);
/**
* devres_remove_group - Remove a devres group
* @dev: Device to remove group for
* @id: ID of target group, can be NULL
*
* Remove the group identified by @id. If @id is NULL, the latest
* open group is selected. Note that removing a group doesn't affect
* any other resources.
*/
void devres_remove_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
grp = find_group(dev, id);
if (grp) {
list_del_init(&grp->node[0].entry);
list_del_init(&grp->node[1].entry);
devres_log(dev, &grp->node[0], "REM");
} else
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
kfree(grp);
}
EXPORT_SYMBOL_GPL(devres_remove_group);
/**
* devres_release_group - Release resources in a devres group
* @dev: Device to release group for
* @id: ID of target group, can be NULL
*
* Release all resources in the group identified by @id. If @id is
* NULL, the latest open group is selected. The selected group and
* groups properly nested inside the selected group are removed.
*
* RETURNS:
* The number of released non-group resources.
*/
int devres_release_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
int cnt = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
grp = find_group(dev, id);
if (grp) {
struct list_head *first = &grp->node[0].entry;
struct list_head *end = &dev->devres_head;
if (!list_empty(&grp->node[1].entry))
end = grp->node[1].entry.next;
cnt = release_nodes(dev, first, end, flags);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
return cnt;
}
EXPORT_SYMBOL_GPL(devres_release_group);
/*
* Managed kzalloc/kfree
*/
static void devm_kzalloc_release(struct device *dev, void *res)
{
/* noop */
}
static int devm_kzalloc_match(struct device *dev, void *res, void *data)
{
return res == data;
}
/**
* devm_kzalloc - Resource-managed kzalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
*
* Managed kzalloc. Memory allocated with this function is
* automatically freed on driver detach. Like all other devres
* resources, guaranteed alignment is unsigned long long.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
dr = alloc_dr(devm_kzalloc_release, size, gfp);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
EXPORT_SYMBOL_GPL(devm_kzalloc);
/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
* Free memory allocated with dev_kzalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
int rc;
rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
| gpl-2.0 |
embest-tech/rowboat-kernel | drivers/serial/kgdboc.c | 836 | 3697 | /*
* Based on the same principle as kgdboe using the NETPOLL api, this
* driver uses a console polling api to implement a gdb serial inteface
* which is multiplexed on a console port.
*
* Maintainer: Jason Wessel <jason.wessel@windriver.com>
*
* 2007-2008 (c) Jason Wessel - Wind River Systems, Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/kgdb.h>
#include <linux/tty.h>
#define MAX_CONFIG_LEN 40
static struct kgdb_io kgdboc_io_ops;
/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
static int configured = -1;
static char config[MAX_CONFIG_LEN];
static struct kparam_string kps = {
.string = config,
.maxlen = MAX_CONFIG_LEN,
};
static struct tty_driver *kgdb_tty_driver;
static int kgdb_tty_line;
static int kgdboc_option_setup(char *opt)
{
if (strlen(opt) > MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdboc: config string too long\n");
return -ENOSPC;
}
strcpy(config, opt);
return 0;
}
__setup("kgdboc=", kgdboc_option_setup);
static int configure_kgdboc(void)
{
struct tty_driver *p;
int tty_line = 0;
int err;
err = kgdboc_option_setup(config);
if (err || !strlen(config) || isspace(config[0]))
goto noconfig;
err = -ENODEV;
p = tty_find_polling_driver(config, &tty_line);
if (!p)
goto noconfig;
kgdb_tty_driver = p;
kgdb_tty_line = tty_line;
err = kgdb_register_io_module(&kgdboc_io_ops);
if (err)
goto noconfig;
configured = 1;
return 0;
noconfig:
config[0] = 0;
configured = 0;
return err;
}
static int __init init_kgdboc(void)
{
/* Already configured? */
if (configured == 1)
return 0;
return configure_kgdboc();
}
static void cleanup_kgdboc(void)
{
if (configured == 1)
kgdb_unregister_io_module(&kgdboc_io_ops);
}
static int kgdboc_get_char(void)
{
return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
kgdb_tty_line);
}
static void kgdboc_put_char(u8 chr)
{
kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
kgdb_tty_line, chr);
}
static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
{
int len = strlen(kmessage);
if (len >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdboc: config string too long\n");
return -ENOSPC;
}
/* Only copy in the string if the init function has not run yet */
if (configured < 0) {
strcpy(config, kmessage);
return 0;
}
if (kgdb_connected) {
printk(KERN_ERR
"kgdboc: Cannot reconfigure while KGDB is connected.\n");
return -EBUSY;
}
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
if (config[len - 1] == '\n')
config[len - 1] = '\0';
if (configured == 1)
cleanup_kgdboc();
/* Go and configure with the new params. */
return configure_kgdboc();
}
static void kgdboc_pre_exp_handler(void)
{
/* Increment the module count when the debugger is active */
if (!kgdb_connected)
try_module_get(THIS_MODULE);
}
static void kgdboc_post_exp_handler(void)
{
/* decrement the module count when the debugger detaches */
if (!kgdb_connected)
module_put(THIS_MODULE);
}
static struct kgdb_io kgdboc_io_ops = {
.name = "kgdboc",
.read_char = kgdboc_get_char,
.write_char = kgdboc_put_char,
.pre_exception = kgdboc_pre_exp_handler,
.post_exception = kgdboc_post_exp_handler,
};
module_init(init_kgdboc);
module_exit(cleanup_kgdboc);
module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
MODULE_PARM_DESC(kgdboc, "<serial_device>[,baud]");
MODULE_DESCRIPTION("KGDB Console TTY Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Canonical-kernel/Ubuntu-kernel | drivers/net/wireless/ath/ath9k/ar9003_mci.c | 1092 | 40939 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include "hw.h"
#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_mci.h"
static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
{
REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
udelay(1);
REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
}
static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
u32 bit_position, int time_out)
{
struct ath_common *common = ath9k_hw_common(ah);
while (time_out) {
if (!(REG_READ(ah, address) & bit_position)) {
udelay(10);
time_out -= 10;
if (time_out < 0)
break;
else
continue;
}
REG_WRITE(ah, address, bit_position);
if (address != AR_MCI_INTERRUPT_RX_MSG_RAW)
break;
if (bit_position & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
ar9003_mci_reset_req_wakeup(ah);
if (bit_position & (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_RX_MSG);
break;
}
if (time_out <= 0) {
ath_dbg(common, MCI,
"MCI Wait for Reg 0x%08x = 0x%08x timeout\n",
address, bit_position);
ath_dbg(common, MCI,
"MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n",
REG_READ(ah, AR_MCI_INTERRUPT_RAW),
REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
time_out = 0;
}
return time_out;
}
static void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
{
u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
wait_done, false);
udelay(5);
}
static void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
{
u32 payload = 0x00000000;
ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
wait_done, false);
}
static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
{
ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
NULL, 0, wait_done, false);
udelay(5);
}
static void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
{
ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
NULL, 0, wait_done, false);
}
static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
{
u32 payload = 0x70000000;
ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
wait_done, false);
}
static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
{
ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
MCI_FLAG_DISABLE_TIMESTAMP,
NULL, 0, wait_done, false);
}
static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
bool wait_done)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
if (mci->bt_version_known ||
(mci->bt_state == MCI_BT_SLEEP))
return;
MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
MCI_GPM_COEX_VERSION_QUERY);
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
}
static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
bool wait_done)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
MCI_GPM_COEX_VERSION_RESPONSE);
*(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
mci->wlan_ver_major;
*(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
mci->wlan_ver_minor;
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
}
static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
bool wait_done)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *payload = &mci->wlan_channels[0];
if (!mci->wlan_channels_update ||
(mci->bt_state == MCI_BT_SLEEP))
return;
MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
MCI_GPM_COEX_WLAN_CHANNELS);
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
}
static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
bool wait_done, u8 query_type)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
bool query_btinfo;
if (mci->bt_state == MCI_BT_SLEEP)
return;
query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
MCI_GPM_COEX_STATUS_QUERY);
*(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
/*
* If bt_status_query message is not sent successfully,
* then need_flush_btinfo should be set again.
*/
if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
wait_done, true)) {
if (query_btinfo)
mci->need_flush_btinfo = true;
}
if (query_btinfo)
mci->query_bt = false;
}
static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
bool wait_done)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
MCI_GPM_COEX_HALT_BT_GPM);
if (halt) {
mci->query_bt = true;
/* Send next unhalt no matter halt sent or not */
mci->unhalt_bt_gpm = true;
mci->need_flush_btinfo = true;
*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
MCI_GPM_COEX_BT_GPM_HALT;
} else
*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
MCI_GPM_COEX_BT_GPM_UNHALT;
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
}
static void ar9003_mci_prep_interface(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 saved_mci_int_en;
u32 mci_timeout = 150;
mci->bt_state = MCI_BT_SLEEP;
saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
REG_READ(ah, AR_MCI_INTERRUPT_RAW));
ar9003_mci_remote_reset(ah, true);
ar9003_mci_send_req_wake(ah, true);
if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500))
goto clear_redunt;
mci->bt_state = MCI_BT_AWAKE;
/*
* we don't need to send more remote_reset at this moment.
* If BT receive first remote_reset, then BT HW will
* be cleaned up and will be able to receive req_wake
* and BT HW will respond sys_waking.
* In this case, WLAN will receive BT's HW sys_waking.
* Otherwise, if BT SW missed initial remote_reset,
* that remote_reset will still clean up BT MCI RX,
* and the req_wake will wake BT up,
* and BT SW will respond this req_wake with a remote_reset and
* sys_waking. In this case, WLAN will receive BT's SW
* sys_waking. In either case, BT's RX is cleaned up. So we
* don't need to reply BT's remote_reset now, if any.
* Similarly, if in any case, WLAN can receive BT's sys_waking,
* that means WLAN's RX is also fine.
*/
ar9003_mci_send_sys_waking(ah, true);
udelay(10);
/*
* Set BT priority interrupt value to be 0xff to
* avoid having too many BT PRIORITY interrupts.
*/
REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
/*
* A contention reset will be received after send out
* sys_waking. Also BT priority interrupt bits will be set.
* Clear those bits before the next step.
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
if (mci->is_2g) {
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
}
if ((mci->is_2g && !mci->update_2g5g)) {
if (ar9003_mci_wait_for_interrupt(ah,
AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
mci_timeout))
ath_dbg(common, MCI,
"MCI WLAN has control over the LNA & BT obeys it\n");
else
ath_dbg(common, MCI,
"MCI BT didn't respond to LNA_TRANS\n");
}
clear_redunt:
/* Clear the extra redundant SYS_WAKING from BT */
if ((mci->bt_state == MCI_BT_AWAKE) &&
(REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
(REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
}
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
}
void ar9003_mci_set_full_sleep(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
(mci->bt_state != MCI_BT_SLEEP) &&
!mci->halted_bt_gpm) {
ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
}
mci->ready = false;
}
static void ar9003_mci_disable_interrupt(struct ath_hw *ah)
{
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
}
static void ar9003_mci_enable_interrupt(struct ath_hw *ah)
{
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
}
static bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
{
u32 intr;
intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
return ((intr & ints) == ints);
}
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
*raw_intr = mci->raw_intr;
*rx_msg_intr = mci->rx_msg_intr;
/* Clean int bits after the values are read. */
mci->raw_intr = 0;
mci->rx_msg_intr = 0;
}
EXPORT_SYMBOL(ar9003_mci_get_interrupt);
void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 raw_intr, rx_msg_intr;
rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef)) {
ath_dbg(common, MCI,
"MCI gets 0xdeadbeef during int processing\n");
} else {
mci->rx_msg_intr |= rx_msg_intr;
mci->raw_intr |= raw_intr;
*masked |= ATH9K_INT_MCI;
if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
mci->cont_status = REG_READ(ah, AR_MCI_CONT_STATUS);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
}
}
static void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (!mci->update_2g5g &&
(mci->is_2g != is_2g))
mci->update_2g5g = true;
mci->is_2g = is_2g;
}
static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *payload;
u32 recv_type, offset;
if (msg_index == MCI_GPM_INVALID)
return false;
offset = msg_index << 4;
payload = (u32 *)(mci->gpm_buf + offset);
recv_type = MCI_GPM_TYPE(payload);
if (recv_type == MCI_GPM_RSVD_PATTERN)
return false;
return true;
}
static void ar9003_mci_observation_set_up(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
} else
return;
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_DS_JTAG_DISABLE, 1);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_WLAN_UART_INTF_EN, 0);
REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL, ATH_MCI_CONFIG_MCI_OBS_GPIO);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
REG_WRITE(ah, AR_OBS, 0x4b);
REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
}
static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
u8 opcode, u32 bt_flags)
{
u32 pld[4] = {0, 0, 0, 0};
MCI_GPM_SET_TYPE_OPCODE(pld, MCI_GPM_COEX_AGENT,
MCI_GPM_COEX_BT_UPDATE_FLAGS);
*(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
*(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
wait_done, true);
}
static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 cur_bt_state;
cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
if (mci->bt_state != cur_bt_state)
mci->bt_state = cur_bt_state;
if (mci->bt_state != MCI_BT_SLEEP) {
ar9003_mci_send_coex_version_query(ah, true);
ar9003_mci_send_coex_wlan_channels(ah, true);
if (mci->unhalt_bt_gpm == true)
ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
}
}
void ar9003_mci_check_bt(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
if (!mci_hw->ready)
return;
/*
* check BT state again to make
* sure it's not changed.
*/
ar9003_mci_sync_bt_state(ah);
ar9003_mci_2g5g_switch(ah, true);
if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
(mci_hw->query_bt == true)) {
mci_hw->need_flush_btinfo = true;
}
}
static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
u8 gpm_opcode, u32 *p_gpm)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u8 *p_data = (u8 *) p_gpm;
if (gpm_type != MCI_GPM_COEX_AGENT)
return;
switch (gpm_opcode) {
case MCI_GPM_COEX_VERSION_QUERY:
ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
ar9003_mci_send_coex_version_response(ah, true);
break;
case MCI_GPM_COEX_VERSION_RESPONSE:
ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
mci->bt_ver_major =
*(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
mci->bt_ver_minor =
*(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
mci->bt_version_known = true;
ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
mci->bt_ver_major, mci->bt_ver_minor);
break;
case MCI_GPM_COEX_STATUS_QUERY:
ath_dbg(common, MCI,
"MCI Recv GPM COEX Status Query = 0x%02X\n",
*(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
mci->wlan_channels_update = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
break;
case MCI_GPM_COEX_BT_PROFILE_INFO:
mci->query_bt = true;
ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
break;
case MCI_GPM_COEX_BT_STATUS_UPDATE:
mci->query_bt = true;
ath_dbg(common, MCI,
"MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
*(p_gpm + 3));
break;
default:
break;
}
}
static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
u8 gpm_opcode, int time_out)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *p_gpm = NULL, mismatch = 0, more_data;
u32 offset;
u8 recv_type = 0, recv_opcode = 0;
bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
while (time_out > 0) {
if (p_gpm) {
MCI_GPM_RECYCLE(p_gpm);
p_gpm = NULL;
}
if (more_data != MCI_GPM_MORE)
time_out = ar9003_mci_wait_for_interrupt(ah,
AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_GPM,
time_out);
if (!time_out)
break;
offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
continue;
p_gpm = (u32 *) (mci->gpm_buf + offset);
recv_type = MCI_GPM_TYPE(p_gpm);
recv_opcode = MCI_GPM_OPCODE(p_gpm);
if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
if (recv_type == gpm_type) {
if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
!b_is_bt_cal_done) {
gpm_type = MCI_GPM_BT_CAL_GRANT;
continue;
}
break;
}
} else if ((recv_type == gpm_type) &&
(recv_opcode == gpm_opcode))
break;
/*
* check if it's cal_grant
*
* When we're waiting for cal_grant in reset routine,
* it's possible that BT sends out cal_request at the
* same time. Since BT's calibration doesn't happen
* that often, we'll let BT completes calibration then
* we continue to wait for cal_grant from BT.
* Orginal: Wait BT_CAL_GRANT.
* New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
* BT_CAL_DONE -> Wait BT_CAL_GRANT.
*/
if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
(recv_type == MCI_GPM_BT_CAL_REQ)) {
u32 payload[4] = {0, 0, 0, 0};
gpm_type = MCI_GPM_BT_CAL_DONE;
MCI_GPM_SET_CAL_TYPE(payload,
MCI_GPM_WLAN_CAL_GRANT);
ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
false, false);
continue;
} else {
ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
*(p_gpm + 1));
mismatch++;
ar9003_mci_process_gpm_extra(ah, recv_type,
recv_opcode, p_gpm);
}
}
if (p_gpm) {
MCI_GPM_RECYCLE(p_gpm);
p_gpm = NULL;
}
if (time_out <= 0)
time_out = 0;
while (more_data == MCI_GPM_MORE) {
offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
break;
p_gpm = (u32 *) (mci->gpm_buf + offset);
recv_type = MCI_GPM_TYPE(p_gpm);
recv_opcode = MCI_GPM_OPCODE(p_gpm);
if (!MCI_GPM_IS_CAL_TYPE(recv_type))
ar9003_mci_process_gpm_extra(ah, recv_type,
recv_opcode, p_gpm);
MCI_GPM_RECYCLE(p_gpm);
}
return time_out;
}
bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
if (mci_hw->bt_state != MCI_BT_CAL_START)
return false;
mci_hw->bt_state = MCI_BT_CAL;
/*
* MCI FIX: disable mci interrupt here. This is to avoid
* SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
* lead to mci_intr reentry.
*/
ar9003_mci_disable_interrupt(ah);
MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
16, true, false);
/* Wait BT calibration to be completed for 25ms */
if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
0, 25000))
ath_dbg(common, MCI, "MCI BT_CAL_DONE received\n");
else
ath_dbg(common, MCI,
"MCI BT_CAL_DONE not received\n");
mci_hw->bt_state = MCI_BT_AWAKE;
/* MCI FIX: enable mci interrupt here */
ar9003_mci_enable_interrupt(ah);
return true;
}
int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata)
{
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
if (!mci_hw->ready)
return 0;
if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP))
goto exit;
if (!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) &&
!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE))
goto exit;
/*
* BT is sleeping. Check if BT wakes up during
* WLAN calibration. If BT wakes up during
* WLAN calibration, need to go through all
* message exchanges again and recal.
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
(AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE));
ar9003_mci_remote_reset(ah, true);
ar9003_mci_send_sys_waking(ah, true);
udelay(1);
if (IS_CHAN_2GHZ(chan))
ar9003_mci_send_lna_transfer(ah, true);
mci_hw->bt_state = MCI_BT_AWAKE;
REG_CLR_BIT(ah, AR_PHY_TIMING4,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
if (caldata) {
clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
clear_bit(RTT_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
REG_SET_BIT(ah, AR_PHY_TIMING4,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
exit:
ar9003_mci_enable_interrupt(ah);
return 0;
}
static void ar9003_mci_mute_bt(struct ath_hw *ah)
{
/* disable all MCI messages */
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
/* wait pending HW messages to flush out */
udelay(10);
/*
* Send LNA_TAKE and SYS_SLEEPING when
* 1. reset not after resuming from full sleep
* 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
*/
ar9003_mci_send_lna_take(ah, true);
udelay(5);
ar9003_mci_send_sys_sleeping(ah, true);
}
static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 thresh;
if (!enable) {
REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
return;
}
REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
if (AR_SREV_9565(ah))
REG_RMW_FIELD(ah, AR_MCI_MISC, AR_MCI_MISC_HW_FIX_EN, 1);
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
} else
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
}
int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
bool is_full_sleep)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 regval, i;
ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
is_full_sleep, is_2g);
if (!mci->gpm_addr && !mci->sched_addr) {
ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
return -ENOMEM;
}
if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
ath_err(common, "BTCOEX control register is dead\n");
return -EINVAL;
}
/* Program MCI DMA related registers */
REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
/*
* To avoid MCI state machine be affected by incoming remote MCI msgs,
* MCI mode will be enabled later, right before reset the MCI TX and RX.
*/
regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
if (AR_SREV_9565(ah)) {
regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
} else {
regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
}
REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true);
else
ar9003_mci_osla_setup(ah, false);
REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
AR_BTCOEX_CTRL_SPDT_ENABLE);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 0);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
/* Set the time out to 3.125ms (5 BT slots) */
REG_RMW_FIELD(ah, AR_BTCOEX_WL_LNA, AR_BTCOEX_WL_LNA_TIMEOUT, 0x3D090);
/* concurrent tx priority */
if (mci->config & ATH_MCI_CONFIG_CONCUR_TX) {
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE, 0);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
AR_BTCOEX_CTRL2_TXPWR_THRESH, 0x7f);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_REDUCE_TXPWR, 0);
for (i = 0; i < 8; i++)
REG_WRITE(ah, AR_BTCOEX_MAX_TXPWR(i), 0x7f7f7f7f);
}
regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
/* Resetting the Rx and Tx paths of MCI */
regval = REG_READ(ah, AR_MCI_COMMAND2);
regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
udelay(1);
regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
if (is_full_sleep) {
ar9003_mci_mute_bt(ah);
udelay(100);
}
/* Check pending GPM msg before MCI Reset Rx */
ar9003_mci_check_gpm_offset(ah);
regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
udelay(1);
regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
ar9003_mci_get_next_gpm_offset(ah, true, NULL);
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
(SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
ar9003_mci_observation_set_up(ah);
mci->ready = true;
ar9003_mci_prep_interface(ah);
if (AR_SREV_9565(ah))
REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
if (en_int)
ar9003_mci_enable_interrupt(ah);
return 0;
}
void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
{
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
ar9003_mci_disable_interrupt(ah);
if (mci_hw->ready && !save_fullsleep) {
ar9003_mci_mute_bt(ah);
udelay(20);
REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
}
mci_hw->bt_state = MCI_BT_SLEEP;
mci_hw->ready = false;
}
static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 new_flags, to_set, to_clear;
if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
return;
if (mci->is_2g) {
new_flags = MCI_2G_FLAGS;
to_clear = MCI_2G_FLAGS_CLEAR_MASK;
to_set = MCI_2G_FLAGS_SET_MASK;
} else {
new_flags = MCI_5G_FLAGS;
to_clear = MCI_5G_FLAGS_CLEAR_MASK;
to_set = MCI_5G_FLAGS_SET_MASK;
}
if (to_clear)
ar9003_mci_send_coex_bt_flags(ah, wait_done,
MCI_GPM_COEX_BT_FLAGS_CLEAR,
to_clear);
if (to_set)
ar9003_mci_send_coex_bt_flags(ah, wait_done,
MCI_GPM_COEX_BT_FLAGS_SET,
to_set);
}
static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
u32 *payload, bool queue)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u8 type, opcode;
/* check if the message is to be queued */
if (header != MCI_GPM)
return;
type = MCI_GPM_TYPE(payload);
opcode = MCI_GPM_OPCODE(payload);
if (type != MCI_GPM_COEX_AGENT)
return;
switch (opcode) {
case MCI_GPM_COEX_BT_UPDATE_FLAGS:
if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
MCI_GPM_COEX_BT_FLAGS_READ)
break;
mci->update_2g5g = queue;
break;
case MCI_GPM_COEX_WLAN_CHANNELS:
mci->wlan_channels_update = queue;
break;
case MCI_GPM_COEX_HALT_BT_GPM:
if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
MCI_GPM_COEX_BT_GPM_UNHALT) {
mci->unhalt_bt_gpm = queue;
if (!queue)
mci->halted_bt_gpm = false;
}
if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
MCI_GPM_COEX_BT_GPM_HALT) {
mci->halted_bt_gpm = !queue;
}
break;
default:
break;
}
}
void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (!mci->update_2g5g && !force)
return;
if (mci->is_2g) {
ar9003_mci_send_2g5g_status(ah, true);
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true);
if (AR_SREV_9462(ah))
REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
} else {
ar9003_mci_send_lna_take(ah, true);
udelay(5);
REG_SET_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
ar9003_mci_osla_setup(ah, false);
ar9003_mci_send_2g5g_status(ah, true);
}
}
bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
u32 *payload, u8 len, bool wait_done,
bool check_bt)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
bool msg_sent = false;
u32 regval;
u32 saved_mci_int_en;
int i;
saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
regval = REG_READ(ah, AR_BTCOEX_CTRL);
if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
ath_dbg(common, MCI,
"MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
header, (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
return false;
} else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
ath_dbg(common, MCI,
"MCI Don't send message 0x%x. BT is in sleep state\n",
header);
ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
return false;
}
if (wait_done)
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
/* Need to clear SW_MSG_DONE raw bit before wait */
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
(AR_MCI_INTERRUPT_SW_MSG_DONE |
AR_MCI_INTERRUPT_MSG_FAIL_MASK));
if (payload) {
for (i = 0; (i * 4) < len; i++)
REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
*(payload + i));
}
REG_WRITE(ah, AR_MCI_COMMAND0,
(SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
SM(len, AR_MCI_COMMAND0_LEN) |
SM(header, AR_MCI_COMMAND0_HEADER)));
if (wait_done &&
!(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
else {
ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
msg_sent = true;
}
if (wait_done)
REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
return msg_sent;
}
EXPORT_SYMBOL(ar9003_mci_send_message);
void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 pld[4] = {0, 0, 0, 0};
if ((mci_hw->bt_state != MCI_BT_AWAKE) ||
(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL))
return;
MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) {
ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n");
} else {
*is_reusable = false;
ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n");
}
}
void ar9003_mci_init_cal_done(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 pld[4] = {0, 0, 0, 0};
if ((mci_hw->bt_state != MCI_BT_AWAKE) ||
(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL))
return;
MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
}
int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
u16 len, u32 sched_addr)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
mci->gpm_addr = gpm_addr;
mci->gpm_buf = gpm_buf;
mci->gpm_len = len;
mci->sched_addr = sched_addr;
return ar9003_mci_reset(ah, true, true, true);
}
EXPORT_SYMBOL(ar9003_mci_setup);
void ar9003_mci_cleanup(struct ath_hw *ah)
{
/* Turn off MCI and Jupiter mode. */
REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
ar9003_mci_disable_interrupt(ah);
}
EXPORT_SYMBOL(ar9003_mci_cleanup);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 value = 0, tsf;
u8 query_type;
switch (state_type) {
case MCI_STATE_ENABLE:
if (mci->ready) {
value = REG_READ(ah, AR_BTCOEX_CTRL);
if ((value == 0xdeadbeef) || (value == 0xffffffff))
value = 0;
}
value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
break;
case MCI_STATE_LAST_SCHD_MSG_OFFSET:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
AR_MCI_RX_LAST_SCHD_MSG_INDEX);
/* Make it in bytes */
value <<= 4;
break;
case MCI_STATE_REMOTE_SLEEP:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
AR_MCI_RX_REMOTE_SLEEP) ?
MCI_BT_SLEEP : MCI_BT_AWAKE;
break;
case MCI_STATE_SET_BT_AWAKE:
mci->bt_state = MCI_BT_AWAKE;
ar9003_mci_send_coex_version_query(ah, true);
ar9003_mci_send_coex_wlan_channels(ah, true);
if (mci->unhalt_bt_gpm)
ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_RESET_REQ_WAKE:
ar9003_mci_reset_req_wakeup(ah);
mci->update_2g5g = true;
if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK) {
/* Check if we still have control of the GPIOs */
if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
ATH_MCI_CONFIG_MCI_OBS_GPIO) {
ar9003_mci_observation_set_up(ah);
}
}
break;
case MCI_STATE_SEND_WLAN_COEX_VERSION:
ar9003_mci_send_coex_version_response(ah, true);
break;
case MCI_STATE_SEND_VERSION_QUERY:
ar9003_mci_send_coex_version_query(ah, true);
break;
case MCI_STATE_SEND_STATUS_QUERY:
query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
break;
case MCI_STATE_RECOVER_RX:
tsf = ath9k_hw_gettsf32(ah);
if ((tsf - mci->last_recovery) <= MCI_RECOVERY_DUR_TSF) {
ath_dbg(ath9k_hw_common(ah), MCI,
"(MCI) ignore Rx recovery\n");
break;
}
ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) RECOVER RX\n");
mci->last_recovery = tsf;
ar9003_mci_prep_interface(ah);
mci->query_bt = true;
mci->need_flush_btinfo = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_NEED_FTP_STOMP:
value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
break;
case MCI_STATE_NEED_FLUSH_BT_INFO:
value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
mci->need_flush_btinfo = false;
break;
default:
break;
}
return value;
}
EXPORT_SYMBOL(ar9003_mci_state);
void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
ath_dbg(common, MCI, "Give LNA and SPDT control to BT\n");
ar9003_mci_send_lna_take(ah, true);
udelay(50);
REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
mci->is_2g = false;
mci->update_2g5g = true;
ar9003_mci_send_2g5g_status(ah, true);
/* Force another 2g5g update at next scanning */
mci->update_2g5g = true;
}
void ar9003_mci_set_power_awake(struct ath_hw *ah)
{
u32 btcoex_ctrl2, diag_sw;
int i;
u8 lna_ctrl, bt_sleep;
for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
if (btcoex_ctrl2 != 0xdeadbeef)
break;
udelay(AH_TIME_QUANTUM);
}
REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
diag_sw = REG_READ(ah, AR_DIAG_SW);
if (diag_sw != 0xdeadbeef)
break;
udelay(AH_TIME_QUANTUM);
}
REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP);
REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
REG_WRITE(ah, AR_DIAG_SW, diag_sw);
if (bt_sleep && (lna_ctrl == 2)) {
REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
udelay(50);
}
}
void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 offset;
/*
* This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
*/
offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
if (mci->gpm_idx == offset)
return;
ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
mci->gpm_idx, offset);
mci->query_bt = true;
mci->need_flush_btinfo = true;
mci->gpm_idx = 0;
}
u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 offset, more_gpm = 0, gpm_ptr;
if (first) {
gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
if (gpm_ptr >= mci->gpm_len)
gpm_ptr = 0;
mci->gpm_idx = gpm_ptr;
return gpm_ptr;
}
/*
* This could be useful to avoid new GPM message interrupt which
* may lead to spurious interrupt after power sleep, or multiple
* entry of ath_mci_intr().
* Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
* alleviate this effect, but clearing GPM RX interrupt bit is
* safe, because whether this is called from hw or driver code
* there must be an interrupt bit set/triggered initially
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_GPM);
gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
offset = gpm_ptr;
if (!offset)
offset = mci->gpm_len - 1;
else if (offset >= mci->gpm_len) {
if (offset != 0xFFFF)
offset = 0;
} else {
offset--;
}
if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
offset = MCI_GPM_INVALID;
more_gpm = MCI_GPM_NOMORE;
goto out;
}
for (;;) {
u32 temp_index;
/* skip reserved GPM if any */
if (offset != mci->gpm_idx)
more_gpm = MCI_GPM_MORE;
else
more_gpm = MCI_GPM_NOMORE;
temp_index = mci->gpm_idx;
if (temp_index >= mci->gpm_len)
temp_index = 0;
mci->gpm_idx++;
if (mci->gpm_idx >= mci->gpm_len)
mci->gpm_idx = 0;
if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
offset = temp_index;
break;
}
if (more_gpm == MCI_GPM_NOMORE) {
offset = MCI_GPM_INVALID;
break;
}
}
if (offset != MCI_GPM_INVALID)
offset <<= 4;
out:
if (more)
*more = more_gpm;
return offset;
}
EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
mci->bt_ver_major = major;
mci->bt_ver_minor = minor;
mci->bt_version_known = true;
ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
mci->bt_ver_major, mci->bt_ver_minor);
}
EXPORT_SYMBOL(ar9003_mci_set_bt_version);
void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
mci->wlan_channels_update = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
}
EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
{
if (!ah->btcoex_hw.mci.concur_tx)
goto out;
if (ctlmode == CTL_2GHT20)
return ATH_BTCOEX_HT20_MAX_TXPOWER;
else if (ctlmode == CTL_2GHT40)
return ATH_BTCOEX_HT40_MAX_TXPOWER;
out:
return -1;
}
| gpl-2.0 |
aznair/mptcp | drivers/input/touchscreen/gunze.c | 2116 | 4683 | /*
* Copyright (c) 2000-2001 Vojtech Pavlik
*/
/*
* Gunze AHL-51S touchscreen driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/serio.h>
#define DRIVER_DESC "Gunze AHL-51S touchscreen driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* Definitions & global arrays.
*/
#define GUNZE_MAX_LENGTH 10
/*
* Per-touchscreen data.
*/
struct gunze {
struct input_dev *dev;
struct serio *serio;
int idx;
unsigned char data[GUNZE_MAX_LENGTH];
char phys[32];
};
static void gunze_process_packet(struct gunze* gunze)
{
struct input_dev *dev = gunze->dev;
if (gunze->idx != GUNZE_MAX_LENGTH || gunze->data[5] != ',' ||
(gunze->data[0] != 'T' && gunze->data[0] != 'R')) {
printk(KERN_WARNING "gunze.c: bad packet: >%.*s<\n", GUNZE_MAX_LENGTH, gunze->data);
return;
}
input_report_abs(dev, ABS_X, simple_strtoul(gunze->data + 1, NULL, 10));
input_report_abs(dev, ABS_Y, 1024 - simple_strtoul(gunze->data + 6, NULL, 10));
input_report_key(dev, BTN_TOUCH, gunze->data[0] == 'T');
input_sync(dev);
}
static irqreturn_t gunze_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct gunze* gunze = serio_get_drvdata(serio);
if (data == '\r') {
gunze_process_packet(gunze);
gunze->idx = 0;
} else {
if (gunze->idx < GUNZE_MAX_LENGTH)
gunze->data[gunze->idx++] = data;
}
return IRQ_HANDLED;
}
/*
* gunze_disconnect() is the opposite of gunze_connect()
*/
static void gunze_disconnect(struct serio *serio)
{
struct gunze *gunze = serio_get_drvdata(serio);
input_get_device(gunze->dev);
input_unregister_device(gunze->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_put_device(gunze->dev);
kfree(gunze);
}
/*
* gunze_connect() is the routine that is called when someone adds a
* new serio device that supports Gunze protocol and registers it as
* an input device.
*/
static int gunze_connect(struct serio *serio, struct serio_driver *drv)
{
struct gunze *gunze;
struct input_dev *input_dev;
int err;
gunze = kzalloc(sizeof(struct gunze), GFP_KERNEL);
input_dev = input_allocate_device();
if (!gunze || !input_dev) {
err = -ENOMEM;
goto fail1;
}
gunze->serio = serio;
gunze->dev = input_dev;
snprintf(gunze->phys, sizeof(serio->phys), "%s/input0", serio->phys);
input_dev->name = "Gunze AHL-51S TouchScreen";
input_dev->phys = gunze->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = SERIO_GUNZE;
input_dev->id.product = 0x0051;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 24, 1000, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 24, 1000, 0, 0);
serio_set_drvdata(serio, gunze);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = input_register_device(gunze->dev);
if (err)
goto fail3;
return 0;
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(gunze);
return err;
}
/*
* The serio driver structure.
*/
static struct serio_device_id gunze_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_GUNZE,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, gunze_serio_ids);
static struct serio_driver gunze_drv = {
.driver = {
.name = "gunze",
},
.description = DRIVER_DESC,
.id_table = gunze_serio_ids,
.interrupt = gunze_interrupt,
.connect = gunze_connect,
.disconnect = gunze_disconnect,
};
module_serio_driver(gunze_drv);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.