repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
mastero9017/hammerhead-5.0 | drivers/usb/host/pehci/host/otg.c | 4560 | 4452 | /*
* Copyright (C) ST-Ericsson AP Pte Ltd 2010
*
* ISP1763 Linux OTG Controller driver : host
*
* This program is free software; you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by the Free Software Foundation; version
* 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* This is a host controller driver file. OTG related events are handled here.
*
* Author : wired support <wired.support@stericsson.com>
*
*/
/*hub device which connected with root port*/
struct usb_device *hubdev = 0;
/* hub interrupt urb*/
struct urb *huburb;
/*return otghub from here*/
struct usb_device *
phci_register_otg_device(struct isp1763_dev *dev)
{
printk("OTG dev %x %d\n",(u32) hubdev, hubdev->devnum);
if (hubdev && hubdev->devnum >= 0x2) {
return hubdev;
}
return NULL;
}
EXPORT_SYMBOL(phci_register_otg_device);
/*suspend the otg port(0)
* needed when port is switching
* from host to device
* */
int
phci_suspend_otg_port(struct isp1763_dev *dev, u32 command)
{
int status = 0;
hubdev->otgstate = USB_OTG_SUSPEND;
if (huburb->status == -EINPROGRESS) {
huburb->status = 0;
}
huburb->status = 0;
huburb->complete(huburb);
return status;
}
EXPORT_SYMBOL(phci_suspend_otg_port);
/*set the flag to enumerate the device*/
int
phci_enumerate_otg_port(struct isp1763_dev *dev, u32 command)
{
/*set the flag to enumerate */
/*connect change interrupt will happen from
* phci_intl_worker only
* */
hubdev->otgstate = USB_OTG_ENUMERATE;
if (huburb->status == -EINPROGRESS) {
huburb->status = 0;
}
/*complete the urb */
huburb->complete(huburb);
/*reset the otghub urb status */
huburb->status = -EINPROGRESS;
return 0;
}
EXPORT_SYMBOL(phci_enumerate_otg_port);
/*host controller resume sequence at otg port*/
int
phci_resume_otg_port(struct isp1763_dev *dev, u32 command)
{
printk("Resume is called\n");
hubdev->otgstate = USB_OTG_RESUME;
if (huburb->status == -EINPROGRESS) {
huburb->status = 0;
}
/*complete the urb */
huburb->complete(huburb);
/*reset the otghub urb status */
huburb->status = -EINPROGRESS;
return 0;
}
EXPORT_SYMBOL(phci_resume_otg_port);
/*host controller remote wakeup sequence at otg port*/
int
phci_remotewakeup(struct isp1763_dev *dev)
{
printk("phci_remotewakeup_otg_port is called\n");
hubdev->otgstate = USB_OTG_REMOTEWAKEUP;
if(huburb->status == -EINPROGRESS)
huburb->status = 0;
/*complete the urb*/
#if ((defined LINUX_269) || defined (LINUX_2611))
huburb->complete(huburb,NULL);
#else
huburb->complete(huburb);
#endif
/*reset the otghub urb status*/
huburb->status = -EINPROGRESS;
return 0;
}
EXPORT_SYMBOL(phci_remotewakeup);
/*host controller wakeup sequence at otg port*/
int
phci_resume_wakeup(struct isp1763_dev *dev)
{
printk("phci_wakeup_otg_port is called\n");
#if 0
hubdev->otgstate = USB_OTG_WAKEUP_ALL;
if(huburb->status == -EINPROGRESS)
#endif
huburb->status = 0;
/*complete the urb*/
#if ((defined LINUX_269) || defined (LINUX_2611))
huburb->complete(huburb,NULL);
#else
huburb->complete(huburb);
#endif
/*reset the otghub urb status*/
huburb->status = -EINPROGRESS;
return 0;
}
EXPORT_SYMBOL(phci_resume_wakeup);
struct isp1763_driver *host_driver;
struct isp1763_driver *device_driver;
void
pehci_delrhtimer(struct isp1763_dev *dev)
{
struct usb_hcd *usb_hcd =
container_of(huburb->dev->parent->bus, struct usb_hcd, self);
del_timer_sync(&usb_hcd->rh_timer);
del_timer(&usb_hcd->rh_timer);
}
EXPORT_SYMBOL(pehci_delrhtimer);
int
pehci_Deinitialize(struct isp1763_dev *dev)
{
dev -= 2;
if (dev->index == 0) {
if (dev->driver) {
if (dev->driver->powerdown) {
dev->driver->powerdown(dev);
}
}
}
return 0;
}
EXPORT_SYMBOL(pehci_Deinitialize);
int
pehci_Reinitialize(struct isp1763_dev *dev)
{
dev -= 2;
if (dev->index == 0) {
if(dev->driver->powerup){
dev->driver->powerup(dev);
}
}
return 0;
}
EXPORT_SYMBOL(pehci_Reinitialize);
| gpl-2.0 |
Tesla-Redux-Devices/kernel_lge_g3 | drivers/hwmon/emc2103.c | 4816 | 21123 | /*
* emc2103.c - Support for SMSC EMC2103
* Copyright (c) 2010 SMSC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* Addresses scanned */
static const unsigned short normal_i2c[] = { 0x2E, I2C_CLIENT_END };
static const u8 REG_TEMP[4] = { 0x00, 0x02, 0x04, 0x06 };
static const u8 REG_TEMP_MIN[4] = { 0x3c, 0x38, 0x39, 0x3a };
static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
#define REG_CONF1 0x20
#define REG_TEMP_MAX_ALARM 0x24
#define REG_TEMP_MIN_ALARM 0x25
#define REG_FAN_CONF1 0x42
#define REG_FAN_TARGET_LO 0x4c
#define REG_FAN_TARGET_HI 0x4d
#define REG_FAN_TACH_HI 0x4e
#define REG_FAN_TACH_LO 0x4f
#define REG_PRODUCT_ID 0xfd
#define REG_MFG_ID 0xfe
/* equation 4 from datasheet: rpm = (3932160 * multipler) / count */
#define FAN_RPM_FACTOR 3932160
/*
* 2103-2 and 2103-4's 3rd temperature sensor can be connected to two diodes
* in anti-parallel mode, and in this configuration both can be read
* independently (so we have 4 temperature inputs). The device can't
* detect if it's connected in this mode, so we have to manually enable
* it. Default is to leave the device in the state it's already in (-1).
* This parameter allows APD mode to be optionally forced on or off
*/
static int apd = -1;
module_param(apd, bint, 0);
MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
struct temperature {
s8 degrees;
u8 fraction; /* 0-7 multiples of 0.125 */
};
struct emc2103_data {
struct device *hwmon_dev;
struct mutex update_lock;
bool valid; /* registers are valid */
bool fan_rpm_control;
int temp_count; /* num of temp sensors */
unsigned long last_updated; /* in jiffies */
struct temperature temp[4]; /* internal + 3 external */
s8 temp_min[4]; /* no fractional part */
s8 temp_max[4]; /* no fractional part */
u8 temp_min_alarm;
u8 temp_max_alarm;
u8 fan_multiplier;
u16 fan_tach;
u16 fan_target;
};
static int read_u8_from_i2c(struct i2c_client *client, u8 i2c_reg, u8 *output)
{
int status = i2c_smbus_read_byte_data(client, i2c_reg);
if (status < 0) {
dev_warn(&client->dev, "reg 0x%02x, err %d\n",
i2c_reg, status);
} else {
*output = status;
}
return status;
}
static void read_temp_from_i2c(struct i2c_client *client, u8 i2c_reg,
struct temperature *temp)
{
u8 degrees, fractional;
if (read_u8_from_i2c(client, i2c_reg, °rees) < 0)
return;
if (read_u8_from_i2c(client, i2c_reg + 1, &fractional) < 0)
return;
temp->degrees = degrees;
temp->fraction = (fractional & 0xe0) >> 5;
}
static void read_fan_from_i2c(struct i2c_client *client, u16 *output,
u8 hi_addr, u8 lo_addr)
{
u8 high_byte, lo_byte;
if (read_u8_from_i2c(client, hi_addr, &high_byte) < 0)
return;
if (read_u8_from_i2c(client, lo_addr, &lo_byte) < 0)
return;
*output = ((u16)high_byte << 5) | (lo_byte >> 3);
}
static void write_fan_target_to_i2c(struct i2c_client *client, u16 new_target)
{
u8 high_byte = (new_target & 0x1fe0) >> 5;
u8 low_byte = (new_target & 0x001f) << 3;
i2c_smbus_write_byte_data(client, REG_FAN_TARGET_LO, low_byte);
i2c_smbus_write_byte_data(client, REG_FAN_TARGET_HI, high_byte);
}
static void read_fan_config_from_i2c(struct i2c_client *client)
{
struct emc2103_data *data = i2c_get_clientdata(client);
u8 conf1;
if (read_u8_from_i2c(client, REG_FAN_CONF1, &conf1) < 0)
return;
data->fan_multiplier = 1 << ((conf1 & 0x60) >> 5);
data->fan_rpm_control = (conf1 & 0x80) != 0;
}
static struct emc2103_data *emc2103_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
int i;
for (i = 0; i < data->temp_count; i++) {
read_temp_from_i2c(client, REG_TEMP[i], &data->temp[i]);
read_u8_from_i2c(client, REG_TEMP_MIN[i],
&data->temp_min[i]);
read_u8_from_i2c(client, REG_TEMP_MAX[i],
&data->temp_max[i]);
}
read_u8_from_i2c(client, REG_TEMP_MIN_ALARM,
&data->temp_min_alarm);
read_u8_from_i2c(client, REG_TEMP_MAX_ALARM,
&data->temp_max_alarm);
read_fan_from_i2c(client, &data->fan_tach,
REG_FAN_TACH_HI, REG_FAN_TACH_LO);
read_fan_from_i2c(client, &data->fan_target,
REG_FAN_TARGET_HI, REG_FAN_TARGET_LO);
read_fan_config_from_i2c(client);
data->last_updated = jiffies;
data->valid = true;
}
mutex_unlock(&data->update_lock);
return data;
}
static ssize_t
show_temp(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
int millidegrees = data->temp[nr].degrees * 1000
+ data->temp[nr].fraction * 125;
return sprintf(buf, "%d\n", millidegrees);
}
static ssize_t
show_temp_min(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
int millidegrees = data->temp_min[nr] * 1000;
return sprintf(buf, "%d\n", millidegrees);
}
static ssize_t
show_temp_max(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
int millidegrees = data->temp_max[nr] * 1000;
return sprintf(buf, "%d\n", millidegrees);
}
static ssize_t
show_temp_fault(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
bool fault = (data->temp[nr].degrees == -128);
return sprintf(buf, "%d\n", fault ? 1 : 0);
}
static ssize_t
show_temp_min_alarm(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
bool alarm = data->temp_min_alarm & (1 << nr);
return sprintf(buf, "%d\n", alarm ? 1 : 0);
}
static ssize_t
show_temp_max_alarm(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
bool alarm = data->temp_max_alarm & (1 << nr);
return sprintf(buf, "%d\n", alarm ? 1 : 0);
}
static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(da)->index;
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
val = DIV_ROUND_CLOSEST(val, 1000);
if ((val < -63) || (val > 127))
return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_min[nr] = val;
i2c_smbus_write_byte_data(client, REG_TEMP_MIN[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(da)->index;
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
val = DIV_ROUND_CLOSEST(val, 1000);
if ((val < -63) || (val > 127))
return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_max[nr] = val;
i2c_smbus_write_byte_data(client, REG_TEMP_MAX[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_fan(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
if (data->fan_tach != 0)
rpm = (FAN_RPM_FACTOR * data->fan_multiplier) / data->fan_tach;
return sprintf(buf, "%d\n", rpm);
}
static ssize_t
show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int fan_div = 8 / data->fan_multiplier;
return sprintf(buf, "%d\n", fan_div);
}
/*
* Note: we also update the fan target here, because its value is
* determined in part by the fan clock divider. This follows the principle
* of least surprise; the user doesn't expect the fan target to change just
* because the divider changed.
*/
static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
int new_range_bits, old_div = 8 / data->fan_multiplier;
long new_div;
int status = kstrtol(buf, 10, &new_div);
if (status < 0)
return -EINVAL;
if (new_div == old_div) /* No change */
return count;
switch (new_div) {
case 1:
new_range_bits = 3;
break;
case 2:
new_range_bits = 2;
break;
case 4:
new_range_bits = 1;
break;
case 8:
new_range_bits = 0;
break;
default:
return -EINVAL;
}
mutex_lock(&data->update_lock);
status = i2c_smbus_read_byte_data(client, REG_FAN_CONF1);
if (status < 0) {
dev_dbg(&client->dev, "reg 0x%02x, err %d\n",
REG_FAN_CONF1, status);
mutex_unlock(&data->update_lock);
return -EIO;
}
status &= 0x9F;
status |= (new_range_bits << 5);
i2c_smbus_write_byte_data(client, REG_FAN_CONF1, status);
data->fan_multiplier = 8 / new_div;
/* update fan target if high byte is not disabled */
if ((data->fan_target & 0x1fe0) != 0x1fe0) {
u16 new_target = (data->fan_target * old_div) / new_div;
data->fan_target = min(new_target, (u16)0x1fff);
write_fan_target_to_i2c(client, data->fan_target);
}
/* invalidate data to force re-read from hardware */
data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
/* high byte of 0xff indicates disabled so return 0 */
if ((data->fan_target != 0) && ((data->fan_target & 0x1fe0) != 0x1fe0))
rpm = (FAN_RPM_FACTOR * data->fan_multiplier)
/ data->fan_target;
return sprintf(buf, "%d\n", rpm);
}
static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
long rpm_target;
int result = kstrtol(buf, 10, &rpm_target);
if (result < 0)
return -EINVAL;
/* Datasheet states 16384 as maximum RPM target (table 3.2) */
if ((rpm_target < 0) || (rpm_target > 16384))
return -EINVAL;
mutex_lock(&data->update_lock);
if (rpm_target == 0)
data->fan_target = 0x1fff;
else
data->fan_target = SENSORS_LIMIT(
(FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
0, 0x1fff);
write_fan_target_to_i2c(client, data->fan_target);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
bool fault = ((data->fan_tach & 0x1fe0) == 0x1fe0);
return sprintf(buf, "%d\n", fault ? 1 : 0);
}
static ssize_t
show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
return sprintf(buf, "%d\n", data->fan_rpm_control ? 3 : 0);
}
static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
long new_value;
u8 conf_reg;
int result = kstrtol(buf, 10, &new_value);
if (result < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
switch (new_value) {
case 0:
data->fan_rpm_control = false;
break;
case 3:
data->fan_rpm_control = true;
break;
default:
mutex_unlock(&data->update_lock);
return -EINVAL;
}
read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
if (data->fan_rpm_control)
conf_reg |= 0x80;
else
conf_reg &= ~0x80;
i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 0);
static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 1);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 2);
static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 2);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 3);
static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 3);
static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 3);
static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div);
static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target,
set_fan_target);
static DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL);
static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
set_pwm_enable);
/* sensors present on all models */
static struct attribute *emc2103_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&dev_attr_fan1_input.attr,
&dev_attr_fan1_div.attr,
&dev_attr_fan1_target.attr,
&dev_attr_fan1_fault.attr,
&dev_attr_pwm1_enable.attr,
NULL
};
/* extra temperature sensors only present on 2103-2 and 2103-4 */
static struct attribute *emc2103_attributes_temp3[] = {
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_fault.dev_attr.attr,
&sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
NULL
};
/* extra temperature sensors only present on 2103-2 and 2103-4 in APD mode */
static struct attribute *emc2103_attributes_temp4[] = {
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_temp4_min.dev_attr.attr,
&sensor_dev_attr_temp4_max.dev_attr.attr,
&sensor_dev_attr_temp4_fault.dev_attr.attr,
&sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group emc2103_group = {
.attrs = emc2103_attributes,
};
static const struct attribute_group emc2103_temp3_group = {
.attrs = emc2103_attributes_temp3,
};
static const struct attribute_group emc2103_temp4_group = {
.attrs = emc2103_attributes_temp4,
};
static int
emc2103_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct emc2103_data *data;
int status;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
data = kzalloc(sizeof(struct emc2103_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* 2103-2 and 2103-4 have 3 external diodes, 2103-1 has 1 */
status = i2c_smbus_read_byte_data(client, REG_PRODUCT_ID);
if (status == 0x24) {
/* 2103-1 only has 1 external diode */
data->temp_count = 2;
} else {
/* 2103-2 and 2103-4 have 3 or 4 external diodes */
status = i2c_smbus_read_byte_data(client, REG_CONF1);
if (status < 0) {
dev_dbg(&client->dev, "reg 0x%02x, err %d\n", REG_CONF1,
status);
goto exit_free;
}
/* detect current state of hardware */
data->temp_count = (status & 0x01) ? 4 : 3;
/* force APD state if module parameter is set */
if (apd == 0) {
/* force APD mode off */
data->temp_count = 3;
status &= ~(0x01);
i2c_smbus_write_byte_data(client, REG_CONF1, status);
} else if (apd == 1) {
/* force APD mode on */
data->temp_count = 4;
status |= 0x01;
i2c_smbus_write_byte_data(client, REG_CONF1, status);
}
}
/* Register sysfs hooks */
status = sysfs_create_group(&client->dev.kobj, &emc2103_group);
if (status)
goto exit_free;
if (data->temp_count >= 3) {
status = sysfs_create_group(&client->dev.kobj,
&emc2103_temp3_group);
if (status)
goto exit_remove;
}
if (data->temp_count == 4) {
status = sysfs_create_group(&client->dev.kobj,
&emc2103_temp4_group);
if (status)
goto exit_remove_temp3;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
status = PTR_ERR(data->hwmon_dev);
goto exit_remove_temp4;
}
dev_info(&client->dev, "%s: sensor '%s'\n",
dev_name(data->hwmon_dev), client->name);
return 0;
exit_remove_temp4:
if (data->temp_count == 4)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
exit_remove_temp3:
if (data->temp_count >= 3)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
exit_remove:
sysfs_remove_group(&client->dev.kobj, &emc2103_group);
exit_free:
kfree(data);
return status;
}
static int emc2103_remove(struct i2c_client *client)
{
struct emc2103_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
if (data->temp_count == 4)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
if (data->temp_count >= 3)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
sysfs_remove_group(&client->dev.kobj, &emc2103_group);
kfree(data);
return 0;
}
static const struct i2c_device_id emc2103_ids[] = {
{ "emc2103", 0, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, emc2103_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int
emc2103_detect(struct i2c_client *new_client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
int manufacturer, product;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
manufacturer = i2c_smbus_read_byte_data(new_client, REG_MFG_ID);
if (manufacturer != 0x5D)
return -ENODEV;
product = i2c_smbus_read_byte_data(new_client, REG_PRODUCT_ID);
if ((product != 0x24) && (product != 0x26))
return -ENODEV;
strlcpy(info->type, "emc2103", I2C_NAME_SIZE);
return 0;
}
static struct i2c_driver emc2103_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "emc2103",
},
.probe = emc2103_probe,
.remove = emc2103_remove,
.id_table = emc2103_ids,
.detect = emc2103_detect,
.address_list = normal_i2c,
};
module_i2c_driver(emc2103_driver);
MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
MODULE_DESCRIPTION("SMSC EMC2103 hwmon driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kashifmin/KashKernel_MT6589 | drivers/ata/pata_ixp4xx_cf.c | 5072 | 5685 | /*
* ixp4xx PATA/Compact Flash driver
* Copyright (C) 2006-07 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* An ATA driver to handle a Compact Flash connected
* to the ixp4xx expansion bus in TrueIDE mode. The CF
* must have it chip selects connected to two CS lines
* on the ixp4xx. In the irq is not available, you might
* want to modify both this driver and libata to run in
* polling mode.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <scsi/scsi_host.h>
#define DRV_NAME "pata_ixp4xx_cf"
#define DRV_VERSION "0.2"
static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
{
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
ata_dev_info(dev, "configured for PIO0\n");
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
return 0;
}
static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
unsigned char *buf, unsigned int buflen, int rw)
{
unsigned int i;
unsigned int words = buflen >> 1;
u16 *buf16 = (u16 *) buf;
struct ata_port *ap = dev->link->ap;
void __iomem *mmio = ap->ioaddr.data_addr;
struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
/* set the expansion bus in 16bit mode and restore
* 8 bit mode after the transaction.
*/
*data->cs0_cfg &= ~(0x01);
udelay(100);
/* Transfer multiple of 2 bytes */
if (rw == READ)
for (i = 0; i < words; i++)
buf16[i] = readw(mmio);
else
for (i = 0; i < words; i++)
writew(buf16[i], mmio);
/* Transfer trailing 1 byte, if any. */
if (unlikely(buflen & 0x01)) {
u16 align_buf[1] = { 0 };
unsigned char *trailing_buf = buf + buflen - 1;
if (rw == READ) {
align_buf[0] = readw(mmio);
memcpy(trailing_buf, align_buf, 1);
} else {
memcpy(align_buf, trailing_buf, 1);
writew(align_buf[0], mmio);
}
words++;
}
udelay(100);
*data->cs0_cfg |= 0x01;
return words << 1;
}
static struct scsi_host_template ixp4xx_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations ixp4xx_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ixp4xx_mmio_data_xfer,
.cable_detect = ata_cable_40wire,
.set_mode = ixp4xx_set_mode,
};
static void ixp4xx_setup_port(struct ata_port *ap,
struct ixp4xx_pata_data *data,
unsigned long raw_cs0, unsigned long raw_cs1)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
unsigned long raw_cmd = raw_cs0;
unsigned long raw_ctl = raw_cs1 + 0x06;
ioaddr->cmd_addr = data->cs0;
ioaddr->altstatus_addr = data->cs1 + 0x06;
ioaddr->ctl_addr = data->cs1 + 0x06;
ata_sff_std_ports(ioaddr);
#ifndef __ARMEB__
/* adjust the addresses to handle the address swizzling of the
* ixp4xx in little endian mode.
*/
*(unsigned long *)&ioaddr->data_addr ^= 0x02;
*(unsigned long *)&ioaddr->cmd_addr ^= 0x03;
*(unsigned long *)&ioaddr->altstatus_addr ^= 0x03;
*(unsigned long *)&ioaddr->ctl_addr ^= 0x03;
*(unsigned long *)&ioaddr->error_addr ^= 0x03;
*(unsigned long *)&ioaddr->feature_addr ^= 0x03;
*(unsigned long *)&ioaddr->nsect_addr ^= 0x03;
*(unsigned long *)&ioaddr->lbal_addr ^= 0x03;
*(unsigned long *)&ioaddr->lbam_addr ^= 0x03;
*(unsigned long *)&ioaddr->lbah_addr ^= 0x03;
*(unsigned long *)&ioaddr->device_addr ^= 0x03;
*(unsigned long *)&ioaddr->status_addr ^= 0x03;
*(unsigned long *)&ioaddr->command_addr ^= 0x03;
raw_cmd ^= 0x03;
raw_ctl ^= 0x03;
#endif
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
}
static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
{
unsigned int irq;
struct resource *cs0, *cs1;
struct ata_host *host;
struct ata_port *ap;
struct ixp4xx_pata_data *data = pdev->dev.platform_data;
cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!cs0 || !cs1)
return -EINVAL;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
if (!data->cs0 || !data->cs1)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq)
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
/* Setup expansion bus chip selects */
*data->cs0_cfg = data->cs0_bits;
*data->cs1_cfg = data->cs1_bits;
ap = host->ports[0];
ap->ops = &ixp4xx_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_NO_ATAPI;
ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* activate host */
return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
}
static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
{
struct ata_host *host = platform_get_drvdata(dev);
ata_host_detach(host);
return 0;
}
static struct platform_driver ixp4xx_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = ixp4xx_pata_probe,
.remove = __devexit_p(ixp4xx_pata_remove),
};
module_platform_driver(ixp4xx_pata_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
RepoBackups/Canuck | drivers/mtd/ar7part.c | 5072 | 4195 | /*
* Copyright © 2007 Eugene Konev <ejka@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* TI AR7 flash partition table.
* Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/bootmem.h>
#include <linux/magic.h>
#include <linux/module.h>
#define AR7_PARTS 4
#define ROOT_OFFSET 0xe0000
#define LOADER_MAGIC1 le32_to_cpu(0xfeedfa42)
#define LOADER_MAGIC2 le32_to_cpu(0xfeed1281)
#ifndef SQUASHFS_MAGIC
#define SQUASHFS_MAGIC 0x73717368
#endif
struct ar7_bin_rec {
unsigned int checksum;
unsigned int length;
unsigned int address;
};
static int create_mtd_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct ar7_bin_rec header;
unsigned int offset;
size_t len;
unsigned int pre_size = master->erasesize, post_size = 0;
unsigned int root_offset = ROOT_OFFSET;
int retries = 10;
struct mtd_partition *ar7_parts;
ar7_parts = kzalloc(sizeof(*ar7_parts) * AR7_PARTS, GFP_KERNEL);
if (!ar7_parts)
return -ENOMEM;
ar7_parts[0].name = "loader";
ar7_parts[0].offset = 0;
ar7_parts[0].size = master->erasesize;
ar7_parts[0].mask_flags = MTD_WRITEABLE;
ar7_parts[1].name = "config";
ar7_parts[1].offset = 0;
ar7_parts[1].size = master->erasesize;
ar7_parts[1].mask_flags = 0;
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
break;
if (header.checksum == LOADER_MAGIC2)
break;
pre_size += master->erasesize;
} while (retries--);
pre_size = offset;
if (!ar7_parts[1].offset) {
ar7_parts[1].offset = master->size - master->erasesize;
post_size = master->erasesize;
}
switch (header.checksum) {
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
mtd_read(master, offset, sizeof(header), &len,
(uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
break;
default:
printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
break;
}
mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
}
ar7_parts[2].name = "linux";
ar7_parts[2].offset = pre_size;
ar7_parts[2].size = master->size - pre_size - post_size;
ar7_parts[2].mask_flags = 0;
ar7_parts[3].name = "rootfs";
ar7_parts[3].offset = root_offset;
ar7_parts[3].size = master->size - root_offset - post_size;
ar7_parts[3].mask_flags = 0;
*pparts = ar7_parts;
return AR7_PARTS;
}
static struct mtd_part_parser ar7_parser = {
.owner = THIS_MODULE,
.parse_fn = create_mtd_partitions,
.name = "ar7part",
};
static int __init ar7_parser_init(void)
{
return register_mtd_parser(&ar7_parser);
}
module_init(ar7_parser_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
"Eugene Konev <ejka@openwrt.org>");
MODULE_DESCRIPTION("MTD partitioning for TI AR7");
| gpl-2.0 |
Quaesar/android_kernel_hp_phobos | drivers/infiniband/hw/mthca/mthca_provider.c | 5328 | 35936 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/export.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_user.h"
#include "mthca_memfree.h"
static void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
mad->class_version = 1;
mad->method = IB_MGMT_METHOD_GET;
}
static int mthca_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
struct mthca_dev *mdev = to_mdev(ibdev);
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
memset(props, 0, sizeof *props);
props->fw_ver = mdev->fw_ver;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
props->device_cap_flags = mdev->device_cap_flags;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
props->max_mr_size = ~0ull;
props->page_size_cap = mdev->limits.page_size_cap;
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
props->max_qp_wr = mdev->limits.max_wqes;
props->max_sge = mdev->limits.max_sg;
props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
props->max_cqe = mdev->limits.max_cqes;
props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
props->max_srq_wr = mdev->limits.max_srq_wqes;
props->max_srq_sge = mdev->limits.max_srq_sge;
props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = mdev->limits.pkey_table_len;
props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
/*
* If Sinai memory key optimization is being used, then only
* the 8-bit key portion will change. For other HCAs, the
* unused index bits will also be used for FMR remapping.
*/
if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
props->max_map_per_fmr = 255;
else
props->max_map_per_fmr =
(1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
err = 0;
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
static int mthca_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
memset(props, 0, sizeof *props);
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
props->sm_sl = out_mad->data[36] & 0xf;
props->state = out_mad->data[32] & 0xf;
props->phys_state = out_mad->data[33] >> 4;
props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
props->max_msg_sz = 0x80000000;
props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
props->active_speed = out_mad->data[35] >> 4;
props->max_mtu = out_mad->data[41] & 0xf;
props->active_mtu = out_mad->data[36] >> 4;
props->subnet_timeout = out_mad->data[51] & 0x1f;
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = out_mad->data[41] >> 4;
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
static int mthca_modify_device(struct ib_device *ibdev,
int mask,
struct ib_device_modify *props)
{
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
return -EOPNOTSUPP;
if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
memcpy(ibdev->node_desc, props->node_desc, 64);
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
}
return 0;
}
static int mthca_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
struct ib_port_modify *props)
{
struct mthca_set_ib_param set_ib;
struct ib_port_attr attr;
int err;
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
err = mthca_query_port(ibdev, port, &attr);
if (err)
goto out;
set_ib.set_si_guid = 0;
set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
~props->clr_port_cap_mask;
err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
if (err)
goto out;
out:
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
return err;
}
static int mthca_query_pkey(struct ib_device *ibdev,
u8 port, u16 index, u16 *pkey)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
static int mthca_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(gid->raw, out_mad->data + 8, 8);
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mthca_alloc_ucontext_resp uresp;
struct mthca_ucontext *context;
int err;
if (!(to_mdev(ibdev)->active))
return ERR_PTR(-EAGAIN);
memset(&uresp, 0, sizeof uresp);
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
if (mthca_is_memfree(to_mdev(ibdev)))
uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
else
uresp.uarc_size = 0;
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context)
return ERR_PTR(-ENOMEM);
err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
if (err) {
kfree(context);
return ERR_PTR(err);
}
context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
if (IS_ERR(context->db_tab)) {
err = PTR_ERR(context->db_tab);
mthca_uar_free(to_mdev(ibdev), &context->uar);
kfree(context);
return ERR_PTR(err);
}
if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
mthca_uar_free(to_mdev(ibdev), &context->uar);
kfree(context);
return ERR_PTR(-EFAULT);
}
context->reg_mr_warned = 0;
return &context->ibucontext;
}
static int mthca_dealloc_ucontext(struct ib_ucontext *context)
{
mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab);
mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
kfree(to_mucontext(context));
return 0;
}
static int mthca_mmap_uar(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start,
to_mucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct mthca_pd *pd;
int err;
pd = kmalloc(sizeof *pd, GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
if (err) {
kfree(pd);
return ERR_PTR(err);
}
if (context) {
if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
mthca_pd_free(to_mdev(ibdev), pd);
kfree(pd);
return ERR_PTR(-EFAULT);
}
}
return &pd->ibpd;
}
static int mthca_dealloc_pd(struct ib_pd *pd)
{
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
kfree(pd);
return 0;
}
static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
int err;
struct mthca_ah *ah;
ah = kmalloc(sizeof *ah, GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
if (err) {
kfree(ah);
return ERR_PTR(err);
}
return &ah->ibah;
}
static int mthca_ah_destroy(struct ib_ah *ah)
{
mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
kfree(ah);
return 0;
}
static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct mthca_create_srq ucmd;
struct mthca_ucontext *context = NULL;
struct mthca_srq *srq;
int err;
if (init_attr->srq_type != IB_SRQT_BASIC)
return ERR_PTR(-ENOSYS);
srq = kmalloc(sizeof *srq, GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
if (pd->uobject) {
context = to_mucontext(pd->uobject->context);
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
err = -EFAULT;
goto err_free;
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index,
ucmd.db_page);
if (err)
goto err_free;
srq->mr.ibmr.lkey = ucmd.lkey;
srq->db_index = ucmd.db_index;
}
err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
&init_attr->attr, srq);
if (err && pd->uobject)
mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index);
if (err)
goto err_free;
if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
mthca_free_srq(to_mdev(pd->device), srq);
err = -EFAULT;
goto err_free;
}
return &srq->ibsrq;
err_free:
kfree(srq);
return ERR_PTR(err);
}
static int mthca_destroy_srq(struct ib_srq *srq)
{
struct mthca_ucontext *context;
if (srq->uobject) {
context = to_mucontext(srq->uobject->context);
mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
context->db_tab, to_msrq(srq)->db_index);
}
mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
kfree(srq);
return 0;
}
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
struct mthca_create_qp ucmd;
struct mthca_qp *qp;
int err;
if (init_attr->create_flags)
return ERR_PTR(-EINVAL);
switch (init_attr->qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
{
struct mthca_ucontext *context;
qp = kmalloc(sizeof *qp, GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
if (pd->uobject) {
context = to_mucontext(pd->uobject->context);
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
kfree(qp);
return ERR_PTR(-EFAULT);
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab,
ucmd.sq_db_index, ucmd.sq_db_page);
if (err) {
kfree(qp);
return ERR_PTR(err);
}
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab,
ucmd.rq_db_index, ucmd.rq_db_page);
if (err) {
mthca_unmap_user_db(to_mdev(pd->device),
&context->uar,
context->db_tab,
ucmd.sq_db_index);
kfree(qp);
return ERR_PTR(err);
}
qp->mr.ibmr.lkey = ucmd.lkey;
qp->sq.db_index = ucmd.sq_db_index;
qp->rq.db_index = ucmd.rq_db_index;
}
err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->qp_type, init_attr->sq_sig_type,
&init_attr->cap, qp);
if (err && pd->uobject) {
context = to_mucontext(pd->uobject->context);
mthca_unmap_user_db(to_mdev(pd->device),
&context->uar,
context->db_tab,
ucmd.sq_db_index);
mthca_unmap_user_db(to_mdev(pd->device),
&context->uar,
context->db_tab,
ucmd.rq_db_index);
}
qp->ibqp.qp_num = qp->qpn;
break;
}
case IB_QPT_SMI:
case IB_QPT_GSI:
{
/* Don't allow userspace to create special QPs */
if (pd->uobject)
return ERR_PTR(-EINVAL);
qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq),
init_attr->sq_sig_type, &init_attr->cap,
qp->ibqp.qp_num, init_attr->port_num,
to_msqp(qp));
break;
}
default:
/* Don't support raw QPs */
return ERR_PTR(-ENOSYS);
}
if (err) {
kfree(qp);
return ERR_PTR(err);
}
init_attr->cap.max_send_wr = qp->sq.max;
init_attr->cap.max_recv_wr = qp->rq.max;
init_attr->cap.max_send_sge = qp->sq.max_gs;
init_attr->cap.max_recv_sge = qp->rq.max_gs;
init_attr->cap.max_inline_data = qp->max_inline_data;
return &qp->ibqp;
}
static int mthca_destroy_qp(struct ib_qp *qp)
{
if (qp->uobject) {
mthca_unmap_user_db(to_mdev(qp->device),
&to_mucontext(qp->uobject->context)->uar,
to_mucontext(qp->uobject->context)->db_tab,
to_mqp(qp)->sq.db_index);
mthca_unmap_user_db(to_mdev(qp->device),
&to_mucontext(qp->uobject->context)->uar,
to_mucontext(qp->uobject->context)->db_tab,
to_mqp(qp)->rq.db_index);
}
mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
kfree(qp);
return 0;
}
static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
int comp_vector,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct mthca_create_cq ucmd;
struct mthca_cq *cq;
int nent;
int err;
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
return ERR_PTR(-EINVAL);
if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab,
ucmd.set_db_index, ucmd.set_db_page);
if (err)
return ERR_PTR(err);
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab,
ucmd.arm_db_index, ucmd.arm_db_page);
if (err)
goto err_unmap_set;
}
cq = kmalloc(sizeof *cq, GFP_KERNEL);
if (!cq) {
err = -ENOMEM;
goto err_unmap_arm;
}
if (context) {
cq->buf.mr.ibmr.lkey = ucmd.lkey;
cq->set_ci_db_index = ucmd.set_db_index;
cq->arm_db_index = ucmd.arm_db_index;
}
for (nent = 1; nent <= entries; nent <<= 1)
; /* nothing */
err = mthca_init_cq(to_mdev(ibdev), nent,
context ? to_mucontext(context) : NULL,
context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
cq);
if (err)
goto err_free;
if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
mthca_free_cq(to_mdev(ibdev), cq);
goto err_free;
}
cq->resize_buf = NULL;
return &cq->ibcq;
err_free:
kfree(cq);
err_unmap_arm:
if (context)
mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab, ucmd.arm_db_index);
err_unmap_set:
if (context)
mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
to_mucontext(context)->db_tab, ucmd.set_db_index);
return ERR_PTR(err);
}
static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
int entries)
{
int ret;
spin_lock_irq(&cq->lock);
if (cq->resize_buf) {
ret = -EBUSY;
goto unlock;
}
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
if (!cq->resize_buf) {
ret = -ENOMEM;
goto unlock;
}
cq->resize_buf->state = CQ_RESIZE_ALLOC;
ret = 0;
unlock:
spin_unlock_irq(&cq->lock);
if (ret)
return ret;
ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
if (ret) {
spin_lock_irq(&cq->lock);
kfree(cq->resize_buf);
cq->resize_buf = NULL;
spin_unlock_irq(&cq->lock);
return ret;
}
cq->resize_buf->cqe = entries - 1;
spin_lock_irq(&cq->lock);
cq->resize_buf->state = CQ_RESIZE_READY;
spin_unlock_irq(&cq->lock);
return 0;
}
static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(ibcq->device);
struct mthca_cq *cq = to_mcq(ibcq);
struct mthca_resize_cq ucmd;
u32 lkey;
int ret;
if (entries < 1 || entries > dev->limits.max_cqes)
return -EINVAL;
mutex_lock(&cq->mutex);
entries = roundup_pow_of_two(entries + 1);
if (entries == ibcq->cqe + 1) {
ret = 0;
goto out;
}
if (cq->is_kernel) {
ret = mthca_alloc_resize_buf(dev, cq, entries);
if (ret)
goto out;
lkey = cq->resize_buf->buf.mr.ibmr.lkey;
} else {
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
ret = -EFAULT;
goto out;
}
lkey = ucmd.lkey;
}
ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
if (ret) {
if (cq->resize_buf) {
mthca_free_cq_buf(dev, &cq->resize_buf->buf,
cq->resize_buf->cqe);
kfree(cq->resize_buf);
spin_lock_irq(&cq->lock);
cq->resize_buf = NULL;
spin_unlock_irq(&cq->lock);
}
goto out;
}
if (cq->is_kernel) {
struct mthca_cq_buf tbuf;
int tcqe;
spin_lock_irq(&cq->lock);
if (cq->resize_buf->state == CQ_RESIZE_READY) {
mthca_cq_resize_copy_cqes(cq);
tbuf = cq->buf;
tcqe = cq->ibcq.cqe;
cq->buf = cq->resize_buf->buf;
cq->ibcq.cqe = cq->resize_buf->cqe;
} else {
tbuf = cq->resize_buf->buf;
tcqe = cq->resize_buf->cqe;
}
kfree(cq->resize_buf);
cq->resize_buf = NULL;
spin_unlock_irq(&cq->lock);
mthca_free_cq_buf(dev, &tbuf, tcqe);
} else
ibcq->cqe = entries - 1;
out:
mutex_unlock(&cq->mutex);
return ret;
}
static int mthca_destroy_cq(struct ib_cq *cq)
{
if (cq->uobject) {
mthca_unmap_user_db(to_mdev(cq->device),
&to_mucontext(cq->uobject->context)->uar,
to_mucontext(cq->uobject->context)->db_tab,
to_mcq(cq)->arm_db_index);
mthca_unmap_user_db(to_mdev(cq->device),
&to_mucontext(cq->uobject->context)->uar,
to_mucontext(cq->uobject->context)->db_tab,
to_mcq(cq)->set_ci_db_index);
}
mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
kfree(cq);
return 0;
}
static inline u32 convert_access(int acc)
{
return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
(acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
MTHCA_MPT_FLAG_LOCAL_READ;
}
static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mthca_mr *mr;
int err;
mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
err = mthca_mr_alloc_notrans(to_mdev(pd->device),
to_mpd(pd)->pd_num,
convert_access(acc), mr);
if (err) {
kfree(mr);
return ERR_PTR(err);
}
mr->umem = NULL;
return &mr->ibmr;
}
static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf,
int acc,
u64 *iova_start)
{
struct mthca_mr *mr;
u64 *page_list;
u64 total_size;
unsigned long mask;
int shift;
int npages;
int err;
int i, j, n;
mask = buffer_list[0].addr ^ *iova_start;
total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
if (i != 0)
mask |= buffer_list[i].addr;
if (i != num_phys_buf - 1)
mask |= buffer_list[i].addr + buffer_list[i].size;
total_size += buffer_list[i].size;
}
if (mask & ~PAGE_MASK)
return ERR_PTR(-EINVAL);
shift = __ffs(mask | 1 << 31);
buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
buffer_list[0].addr &= ~0ull << shift;
mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
npages = 0;
for (i = 0; i < num_phys_buf; ++i)
npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
if (!npages)
return &mr->ibmr;
page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
if (!page_list) {
kfree(mr);
return ERR_PTR(-ENOMEM);
}
n = 0;
for (i = 0; i < num_phys_buf; ++i)
for (j = 0;
j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
++j)
page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
"in PD %x; shift %d, npages %d.\n",
(unsigned long long) buffer_list[0].addr,
(unsigned long long) *iova_start,
to_mpd(pd)->pd_num,
shift, npages);
err = mthca_mr_alloc_phys(to_mdev(pd->device),
to_mpd(pd)->pd_num,
page_list, shift, npages,
*iova_start, total_size,
convert_access(acc), mr);
if (err) {
kfree(page_list);
kfree(mr);
return ERR_PTR(err);
}
kfree(page_list);
mr->umem = NULL;
return &mr->ibmr;
}
static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
struct ib_umem_chunk *chunk;
struct mthca_mr *mr;
struct mthca_reg_mr ucmd;
u64 *pages;
int shift, n, len;
int i, j, k;
int err = 0;
int write_mtt_size;
if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
current->comm);
mthca_warn(dev, " Update libmthca to fix this.\n");
}
++to_mucontext(pd->uobject->context)->reg_mr_warned;
ucmd.mr_attrs = 0;
} else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
ucmd.mr_attrs & MTHCA_MR_DMASYNC);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
}
shift = ffs(mr->umem->page_size) - 1;
n = 0;
list_for_each_entry(chunk, &mr->umem->chunk_list, list)
n += chunk->nents;
mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) {
err = PTR_ERR(mr->mtt);
goto err_umem;
}
pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages) {
err = -ENOMEM;
goto err_mtt;
}
i = n = 0;
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
list_for_each_entry(chunk, &mr->umem->chunk_list, list)
for (j = 0; j < chunk->nmap; ++j) {
len = sg_dma_len(&chunk->page_list[j]) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(&chunk->page_list[j]) +
mr->umem->page_size * k;
/*
* Be friendly to write_mtt and pass it chunks
* of appropriate size.
*/
if (i == write_mtt_size) {
err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
if (err)
goto mtt_done;
n += i;
i = 0;
}
}
}
if (i)
err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
mtt_done:
free_page((unsigned long) pages);
if (err)
goto err_mtt;
err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
convert_access(acc), mr);
if (err)
goto err_mtt;
return &mr->ibmr;
err_mtt:
mthca_free_mtt(dev, mr->mtt);
err_umem:
ib_umem_release(mr->umem);
err:
kfree(mr);
return ERR_PTR(err);
}
static int mthca_dereg_mr(struct ib_mr *mr)
{
struct mthca_mr *mmr = to_mmr(mr);
mthca_free_mr(to_mdev(mr->device), mmr);
if (mmr->umem)
ib_umem_release(mmr->umem);
kfree(mmr);
return 0;
}
static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct mthca_fmr *fmr;
int err;
fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
if (!fmr)
return ERR_PTR(-ENOMEM);
memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
convert_access(mr_access_flags), fmr);
if (err) {
kfree(fmr);
return ERR_PTR(err);
}
return &fmr->ibmr;
}
static int mthca_dealloc_fmr(struct ib_fmr *fmr)
{
struct mthca_fmr *mfmr = to_mfmr(fmr);
int err;
err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
if (err)
return err;
kfree(mfmr);
return 0;
}
static int mthca_unmap_fmr(struct list_head *fmr_list)
{
struct ib_fmr *fmr;
int err;
struct mthca_dev *mdev = NULL;
list_for_each_entry(fmr, fmr_list, list) {
if (mdev && to_mdev(fmr->device) != mdev)
return -EINVAL;
mdev = to_mdev(fmr->device);
}
if (!mdev)
return 0;
if (mthca_is_memfree(mdev)) {
list_for_each_entry(fmr, fmr_list, list)
mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
wmb();
} else
list_for_each_entry(fmr, fmr_list, list)
mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
err = mthca_SYNC_TPT(mdev);
return err;
}
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->rev_id);
}
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
(int) (dev->fw_ver >> 16) & 0xffff,
(int) dev->fw_ver & 0xffff);
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
switch (dev->pdev->device) {
case PCI_DEVICE_ID_MELLANOX_TAVOR:
return sprintf(buf, "MT23108\n");
case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
case PCI_DEVICE_ID_MELLANOX_ARBEL:
return sprintf(buf, "MT25208\n");
case PCI_DEVICE_ID_MELLANOX_SINAI:
case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
return sprintf(buf, "MT25204\n");
default:
return sprintf(buf, "unknown\n");
}
}
static ssize_t show_board(struct device *device, struct device_attribute *attr,
char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *mthca_dev_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
static int mthca_init_node_data(struct mthca_dev *dev)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mthca_MAD_IFC(dev, 1, 1,
1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(dev, 1, 1,
1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
if (mthca_is_memfree(dev))
dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
int i;
ret = mthca_init_node_data(dev);
if (ret)
return ret;
strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.num_comp_vectors = 1;
dev->ib_dev.dma_device = &dev->pdev->dev;
dev->ib_dev.query_device = mthca_query_device;
dev->ib_dev.query_port = mthca_query_port;
dev->ib_dev.modify_device = mthca_modify_device;
dev->ib_dev.modify_port = mthca_modify_port;
dev->ib_dev.query_pkey = mthca_query_pkey;
dev->ib_dev.query_gid = mthca_query_gid;
dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
dev->ib_dev.mmap = mthca_mmap_uar;
dev->ib_dev.alloc_pd = mthca_alloc_pd;
dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
dev->ib_dev.create_ah = mthca_ah_create;
dev->ib_dev.query_ah = mthca_ah_query;
dev->ib_dev.destroy_ah = mthca_ah_destroy;
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
dev->ib_dev.create_srq = mthca_create_srq;
dev->ib_dev.modify_srq = mthca_modify_srq;
dev->ib_dev.query_srq = mthca_query_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
dev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
if (mthca_is_memfree(dev))
dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
else
dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
}
dev->ib_dev.create_qp = mthca_create_qp;
dev->ib_dev.modify_qp = mthca_modify_qp;
dev->ib_dev.query_qp = mthca_query_qp;
dev->ib_dev.destroy_qp = mthca_destroy_qp;
dev->ib_dev.create_cq = mthca_create_cq;
dev->ib_dev.resize_cq = mthca_resize_cq;
dev->ib_dev.destroy_cq = mthca_destroy_cq;
dev->ib_dev.poll_cq = mthca_poll_cq;
dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
dev->ib_dev.dereg_mr = mthca_dereg_mr;
if (dev->mthca_flags & MTHCA_FLAG_FMR) {
dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
if (mthca_is_memfree(dev))
dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
else
dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
}
dev->ib_dev.attach_mcast = mthca_multicast_attach;
dev->ib_dev.detach_mcast = mthca_multicast_detach;
dev->ib_dev.process_mad = mthca_process_mad;
if (mthca_is_memfree(dev)) {
dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
dev->ib_dev.post_send = mthca_arbel_post_send;
dev->ib_dev.post_recv = mthca_arbel_post_receive;
} else {
dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
dev->ib_dev.post_send = mthca_tavor_post_send;
dev->ib_dev.post_recv = mthca_tavor_post_receive;
}
mutex_init(&dev->cap_mask_mutex);
ret = ib_register_device(&dev->ib_dev, NULL);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
ret = device_create_file(&dev->ib_dev.dev,
mthca_dev_attributes[i]);
if (ret) {
ib_unregister_device(&dev->ib_dev);
return ret;
}
}
mthca_start_catas_poll(dev);
return 0;
}
void mthca_unregister_device(struct mthca_dev *dev)
{
mthca_stop_catas_poll(dev);
ib_unregister_device(&dev->ib_dev);
}
| gpl-2.0 |
loumatrix/android_kernel_asus_me301t | drivers/platform/x86/xo15-ebook.c | 7888 | 4238 | /*
* OLPC XO-1.5 ebook switch driver
* (based on generic ACPI button driver)
*
* Copyright (C) 2009 Paul Fox <pgf@laptop.org>
* Copyright (C) 2010 One Laptop per Child
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/input.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#define MODULE_NAME "xo15-ebook"
#define XO15_EBOOK_CLASS MODULE_NAME
#define XO15_EBOOK_TYPE_UNKNOWN 0x00
#define XO15_EBOOK_NOTIFY_STATUS 0x80
#define XO15_EBOOK_SUBCLASS "ebook"
#define XO15_EBOOK_HID "XO15EBK"
#define XO15_EBOOK_DEVICE_NAME "EBook Switch"
ACPI_MODULE_NAME(MODULE_NAME);
MODULE_DESCRIPTION("OLPC XO-1.5 ebook switch driver");
MODULE_LICENSE("GPL");
static const struct acpi_device_id ebook_device_ids[] = {
{ XO15_EBOOK_HID, 0 },
{ "", 0 },
};
MODULE_DEVICE_TABLE(acpi, ebook_device_ids);
struct ebook_switch {
struct input_dev *input;
char phys[32]; /* for input device */
};
static int ebook_send_state(struct acpi_device *device)
{
struct ebook_switch *button = acpi_driver_data(device);
unsigned long long state;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "EBK", NULL, &state);
if (ACPI_FAILURE(status))
return -EIO;
/* input layer checks if event is redundant */
input_report_switch(button->input, SW_TABLET_MODE, !state);
input_sync(button->input);
return 0;
}
static void ebook_switch_notify(struct acpi_device *device, u32 event)
{
switch (event) {
case ACPI_FIXED_HARDWARE_EVENT:
case XO15_EBOOK_NOTIFY_STATUS:
ebook_send_state(device);
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
break;
}
}
static int ebook_switch_resume(struct acpi_device *device)
{
return ebook_send_state(device);
}
static int ebook_switch_add(struct acpi_device *device)
{
struct ebook_switch *button;
struct input_dev *input;
const char *hid = acpi_device_hid(device);
char *name, *class;
int error;
button = kzalloc(sizeof(struct ebook_switch), GFP_KERNEL);
if (!button)
return -ENOMEM;
device->driver_data = button;
button->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_free_button;
}
name = acpi_device_name(device);
class = acpi_device_class(device);
if (strcmp(hid, XO15_EBOOK_HID)) {
pr_err("Unsupported hid [%s]\n", hid);
error = -ENODEV;
goto err_free_input;
}
strcpy(name, XO15_EBOOK_DEVICE_NAME);
sprintf(class, "%s/%s", XO15_EBOOK_CLASS, XO15_EBOOK_SUBCLASS);
snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid);
input->name = name;
input->phys = button->phys;
input->id.bustype = BUS_HOST;
input->dev.parent = &device->dev;
input->evbit[0] = BIT_MASK(EV_SW);
set_bit(SW_TABLET_MODE, input->swbit);
error = input_register_device(input);
if (error)
goto err_free_input;
ebook_send_state(device);
if (device->wakeup.flags.valid) {
/* Button's GPE is run-wake GPE */
acpi_enable_gpe(device->wakeup.gpe_device,
device->wakeup.gpe_number);
device_set_wakeup_enable(&device->dev, true);
}
return 0;
err_free_input:
input_free_device(input);
err_free_button:
kfree(button);
return error;
}
static int ebook_switch_remove(struct acpi_device *device, int type)
{
struct ebook_switch *button = acpi_driver_data(device);
input_unregister_device(button->input);
kfree(button);
return 0;
}
static struct acpi_driver xo15_ebook_driver = {
.name = MODULE_NAME,
.class = XO15_EBOOK_CLASS,
.ids = ebook_device_ids,
.ops = {
.add = ebook_switch_add,
.resume = ebook_switch_resume,
.remove = ebook_switch_remove,
.notify = ebook_switch_notify,
},
};
static int __init xo15_ebook_init(void)
{
return acpi_bus_register_driver(&xo15_ebook_driver);
}
static void __exit xo15_ebook_exit(void)
{
acpi_bus_unregister_driver(&xo15_ebook_driver);
}
module_init(xo15_ebook_init);
module_exit(xo15_ebook_exit);
| gpl-2.0 |
moresushant48/android_kernel_cyanogen_msm8916 | arch/sparc/kernel/prom_32.c | 9424 | 7659 | /*
* Procedures for creating, accessing and interpreting the device tree.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc32 by David S. Miller davem@davemloft.net
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/leon.h>
#include <asm/leon_amba.h>
#include "prom.h"
void * __init prom_early_alloc(unsigned long size)
{
void *ret;
ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
if (ret != NULL)
memset(ret, 0, size);
prom_early_allocated += size;
return ret;
}
/* The following routines deal with the black magic of fully naming a
* node.
*
* Certain well known named nodes are just the simple name string.
*
* Actual devices have an address specifier appended to the base name
* string, like this "foo@addr". The "addr" can be in any number of
* formats, and the platform plus the type of the node determine the
* format and how it is constructed.
*
* For children of the ROOT node, the naming convention is fixed and
* determined by whether this is a sun4u or sun4v system.
*
* For children of other nodes, it is bus type specific. So
* we walk up the tree until we discover a "device_type" property
* we recognize and we go from there.
*/
static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_registers *regs;
struct property *rprop;
rprop = of_find_property(dp, "reg", NULL);
if (!rprop)
return;
regs = rprop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
regs->which_io, regs->phys_addr);
}
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
regs->which_io,
regs->phys_addr);
}
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
dp->name,
devfn >> 3);
}
}
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
regs->which_io, regs->phys_addr);
}
/* "name:vendor:device@irq,addrlo" */
static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
{
struct amba_prom_registers *regs;
unsigned int *intr, *device, *vendor, reg0;
struct property *prop;
int interrupt = 0;
/* In order to get a unique ID in the device tree (multiple AMBA devices
* may have the same name) the node number is printed
*/
prop = of_find_property(dp, "reg", NULL);
if (!prop) {
reg0 = (unsigned int)dp->phandle;
} else {
regs = prop->value;
reg0 = regs->phys_addr;
}
/* Not all cores have Interrupt */
prop = of_find_property(dp, "interrupts", NULL);
if (!prop)
intr = &interrupt; /* IRQ0 does not exist */
else
intr = prop->value;
prop = of_find_property(dp, "vendor", NULL);
if (!prop)
return;
vendor = prop->value;
prop = of_find_property(dp, "device", NULL);
if (!prop)
return;
device = prop->value;
sprintf(tmp_buf, "%s:%d:%d@%x,%x",
dp->name, *vendor, *device,
*intr, reg0);
}
static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
{
struct device_node *parent = dp->parent;
if (parent != NULL) {
if (!strcmp(parent->type, "pci") ||
!strcmp(parent->type, "pciex"))
return pci_path_component(dp, tmp_buf);
if (!strcmp(parent->type, "sbus"))
return sbus_path_component(dp, tmp_buf);
if (!strcmp(parent->type, "ebus"))
return ebus_path_component(dp, tmp_buf);
if (!strcmp(parent->type, "ambapp"))
return ambapp_path_component(dp, tmp_buf);
/* "isa" is handled with platform naming */
}
/* Use platform naming convention. */
return sparc32_path_component(dp, tmp_buf);
}
char * __init build_path_component(struct device_node *dp)
{
char tmp_buf[64], *n;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
strcpy(tmp_buf, dp->name);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
return n;
}
extern void restore_current(void);
void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
struct device_node *dp;
unsigned long flags;
const char *type;
phandle node;
int skip, tmp, fd;
of_console_path = prom_early_alloc(256);
switch (prom_vers) {
case PROM_V0:
skip = 0;
switch (*romvec->pv_stdout) {
case PROMDEV_SCREEN:
type = "display";
break;
case PROMDEV_TTYB:
skip = 1;
/* FALLTHRU */
case PROMDEV_TTYA:
type = "serial";
break;
default:
prom_printf("Invalid PROM_V0 stdout value %u\n",
*romvec->pv_stdout);
prom_halt();
}
tmp = skip;
for_each_node_by_type(dp, type) {
if (!tmp--)
break;
}
if (!dp) {
prom_printf("Cannot find PROM_V0 console node.\n");
prom_halt();
}
of_console_device = dp;
strcpy(of_console_path, dp->full_name);
if (!strcmp(type, "serial")) {
strcat(of_console_path,
(skip ? ":b" : ":a"));
}
break;
default:
case PROM_V2:
case PROM_V3:
fd = *romvec->pv_v2bootargs.fd_stdout;
spin_lock_irqsave(&prom_lock, flags);
node = (*romvec->pv_v2devops.v2_inst2pkg)(fd);
restore_current();
spin_unlock_irqrestore(&prom_lock, flags);
if (!node) {
prom_printf("Cannot resolve stdout node from "
"instance %08x.\n", fd);
prom_halt();
}
dp = of_find_node_by_phandle(node);
type = of_get_property(dp, "device_type", NULL);
if (!type) {
prom_printf("Console stdout lacks "
"device_type property.\n");
prom_halt();
}
if (strcmp(type, "display") && strcmp(type, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
}
of_console_device = dp;
if (prom_vers == PROM_V2) {
strcpy(of_console_path, dp->full_name);
switch (*romvec->pv_stdout) {
case PROMDEV_TTYA:
strcat(of_console_path, ":a");
break;
case PROMDEV_TTYB:
strcat(of_console_path, ":b");
break;
}
} else {
const char *path;
dp = of_find_node_by_path("/");
path = of_get_property(dp, "stdout-path", NULL);
if (!path) {
prom_printf("No stdout-path in root node.\n");
prom_halt();
}
strcpy(of_console_path, path);
}
break;
}
of_console_options = strrchr(of_console_path, ':');
if (of_console_options) {
of_console_options++;
if (*of_console_options == '\0')
of_console_options = NULL;
}
printk(msg, of_console_path);
}
void __init of_fill_in_cpu_data(void)
{
}
void __init irq_trans_init(struct device_node *dp)
{
}
| gpl-2.0 |
crpalmer/m7-google | arch/cris/arch-v10/mm/init.c | 11472 | 9119 | /*
* linux/arch/cris/arch-v10/mm/init.c
*
*/
#include <linux/mmzone.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/types.h>
#include <asm/mmu.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <arch/svinto.h>
extern void tlb_init(void);
/*
* The kernel is already mapped with a kernel segment at kseg_c so
* we don't need to map it with a page table. However head.S also
* temporarily mapped it at kseg_4 so we should set up the ksegs again,
* clear the TLB and do some other paging setup stuff.
*/
void __init
paging_init(void)
{
int i;
unsigned long zones_size[MAX_NR_ZONES];
printk("Setting up paging and the MMU.\n");
/* clear out the init_mm.pgd that will contain the kernel's mappings */
for(i = 0; i < PTRS_PER_PGD; i++)
swapper_pg_dir[i] = __pgd(0);
/* make sure the current pgd table points to something sane
* (even if it is most probably not used until the next
* switch_mm)
*/
per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
/* initialise the TLB (tlb.c) */
tlb_init();
/* see README.mm for details on the KSEG setup */
#ifdef CONFIG_CRIS_LOW_MAP
/* Etrax-100 LX version 1 has a bug so that we cannot map anything
* across the 0x80000000 boundary, so we need to shrink the user-virtual
* area to 0x50000000 instead of 0xb0000000 and map things slightly
* different. The unused areas are marked as paged so that we can catch
* freak kernel accesses there.
*
* The ARTPEC chip is mapped at 0xa so we pass that segment straight
* through. We cannot vremap it because the vmalloc area is below 0x8
* and Juliette needs an uncached area above 0x8.
*
* Same thing with 0xc and 0x9, which is memory-mapped I/O on some boards.
* We map them straight over in LOW_MAP, but use vremap in LX version 2.
*/
#define CACHED_BOOTROM (KSEG_F | 0x08000000UL)
*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* bootrom */
IO_STATE(R_MMU_KSEG, seg_e, page ) |
IO_STATE(R_MMU_KSEG, seg_d, page ) |
IO_STATE(R_MMU_KSEG, seg_c, page ) |
IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */
#ifdef CONFIG_JULIETTE
IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* ARTPEC etc. */
#else
IO_STATE(R_MMU_KSEG, seg_a, page ) |
#endif
IO_STATE(R_MMU_KSEG, seg_9, seg ) | /* LED's on some boards */
IO_STATE(R_MMU_KSEG, seg_8, seg ) | /* CSE0/1, flash and I/O */
IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */
IO_STATE(R_MMU_KSEG, seg_6, seg ) | /* kernel DRAM area */
IO_STATE(R_MMU_KSEG, seg_5, seg ) | /* cached flash */
IO_STATE(R_MMU_KSEG, seg_4, page ) | /* user area */
IO_STATE(R_MMU_KSEG, seg_3, page ) | /* user area */
IO_STATE(R_MMU_KSEG, seg_2, page ) | /* user area */
IO_STATE(R_MMU_KSEG, seg_1, page ) | /* user area */
IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */
*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) |
IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
#ifdef CONFIG_JULIETTE
IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
#else
IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
#endif
IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#else
/* This code is for the corrected Etrax-100 LX version 2... */
#define CACHED_BOOTROM (KSEG_A | 0x08000000UL)
*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* cached flash */
IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */
IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */
IO_STATE(R_MMU_KSEG, seg_c, seg ) | /* kernel area */
IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */
IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* bootrom */
IO_STATE(R_MMU_KSEG, seg_9, page ) | /* user area */
IO_STATE(R_MMU_KSEG, seg_8, page ) |
IO_STATE(R_MMU_KSEG, seg_7, page ) |
IO_STATE(R_MMU_KSEG, seg_6, page ) |
IO_STATE(R_MMU_KSEG, seg_5, page ) |
IO_STATE(R_MMU_KSEG, seg_4, page ) |
IO_STATE(R_MMU_KSEG, seg_3, page ) |
IO_STATE(R_MMU_KSEG, seg_2, page ) |
IO_STATE(R_MMU_KSEG, seg_1, page ) |
IO_STATE(R_MMU_KSEG, seg_0, page ) );
*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) |
IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) |
IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#endif
*R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) );
/* The MMU has been enabled ever since head.S but just to make
* it totally obvious we do it here as well.
*/
*R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) |
IO_STATE(R_MMU_CTRL, acc_excp, enable ) |
IO_STATE(R_MMU_CTRL, we_excp, enable ) );
*R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable);
/*
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/* All pages are DMA'able in Etrax, so put all in the DMA'able zone */
zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
for (i = 1; i < MAX_NR_ZONES; i++)
zones_size[i] = 0;
/* Use free_area_init_node instead of free_area_init, because the former
* is designed for systems where the DRAM starts at an address substantially
* higher than 0, like us (we start at PAGE_OFFSET). This saves space in the
* mem_map page array.
*/
free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
}
/* Initialize remaps of some I/O-ports. It is important that this
* is called before any driver is initialized.
*/
static int
__init init_ioremap(void)
{
/* Give the external I/O-port addresses their values */
#ifdef CONFIG_CRIS_LOW_MAP
/* Simply a linear map (see the KSEG map above in paging_init) */
port_cse1_addr = (volatile unsigned long *)(MEM_CSE1_START |
MEM_NON_CACHEABLE);
port_csp0_addr = (volatile unsigned long *)(MEM_CSP0_START |
MEM_NON_CACHEABLE);
port_csp4_addr = (volatile unsigned long *)(MEM_CSP4_START |
MEM_NON_CACHEABLE);
#else
/* Note that nothing blows up just because we do this remapping
* it's ok even if the ports are not used or connected
* to anything (or connected to a non-I/O thing) */
port_cse1_addr = (volatile unsigned long *)
ioremap((unsigned long)(MEM_CSE1_START | MEM_NON_CACHEABLE), 16);
port_csp0_addr = (volatile unsigned long *)
ioremap((unsigned long)(MEM_CSP0_START | MEM_NON_CACHEABLE), 16);
port_csp4_addr = (volatile unsigned long *)
ioremap((unsigned long)(MEM_CSP4_START | MEM_NON_CACHEABLE), 16);
#endif
return 0;
}
__initcall(init_ioremap);
/* Helper function for the two below */
static inline void
flush_etrax_cacherange(void *startadr, int length)
{
/* CACHED_BOOTROM is mapped to the boot-rom area (cached) which
* we can use to get fast dummy-reads of cachelines
*/
volatile short *flushadr = (volatile short *)(((unsigned long)startadr & ~PAGE_MASK) |
CACHED_BOOTROM);
length = length > 8192 ? 8192 : length; /* No need to flush more than cache size */
while(length > 0) {
*flushadr; /* dummy read to flush */
flushadr += (32/sizeof(short)); /* a cacheline is 32 bytes */
length -= 32;
}
}
/* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers
* will occasionally corrupt certain CPU writes if the DMA buffers
* happen to be hot in the cache.
*
* As a workaround, we have to flush the relevant parts of the cache
* before (re) inserting any receiving descriptor into the DMA HW.
*/
void
prepare_rx_descriptor(struct etrax_dma_descr *desc)
{
flush_etrax_cacherange((void *)desc->buf, desc->sw_len ? desc->sw_len : 65536);
}
/* Do the same thing but flush the entire cache */
void
flush_etrax_cache(void)
{
flush_etrax_cacherange(0, 8192);
}
| gpl-2.0 |
visi0nary/mt6735-kernel-3.10.61 | drivers/misc/mediatek/connectivity/combo/drv_wlan/mt6628/wlan/nic/nic_pwr_mgt.c | 209 | 25151 | /*
** $Id: //Department/DaVinci/BRANCHES/MT6620_WIFI_DRIVER_V2_3/nic/nic_pwr_mgt.c#1 $
*/
/*! \file "nic_pwr_mgt.c"
\brief In this file we define the STATE and EVENT for Power Management FSM.
The SCAN FSM is responsible for performing SCAN behavior when the Arbiter enter
ARB_STATE_SCAN. The STATE and EVENT for SCAN FSM are defined here with detail
description.
*/
/*
** $Log: nic_pwr_mgt.c $
*
* 11 28 2011 cp.wu
* [WCXRP00001125] [MT6620 Wi-Fi][Firmware] Strengthen Wi-Fi power off sequence to have a clearroom environment when returining to ROM code
* 1. Due to firmware now stops HIF DMA for powering off, do not try to receive any packet from firmware
* 2. Take use of prAdapter->fgIsEnterD3ReqIssued for tracking whether it is powering off or not
*
* 10 03 2011 cp.wu
* [WCXRP00001022] [MT6628 Driver][Firmware Download] Add multi section independent download functionality
* add firmware download path in divided scatters.
*
* 08 15 2011 cp.wu
* [WCXRP00000851] [MT6628 Wi-Fi][Driver] Add HIFSYS related definition to driver source tree
* reuse firmware download logic of MT6620 for MT6628.
*
* 05 11 2011 cp.wu
* [WCXRP00000718] [MT6620 Wi-Fi] modify the behavior of setting tx power
* ACPI APIs migrate to wlan_lib.c for glue layer to invoke.
*
* 04 29 2011 cp.wu
* [WCXRP00000636] [WHQL][MT5931 Driver] 2c_PMHibernate (hang on 2h)
* fix for compilation error when applied with FW_DOWNLOAD = 0
*
* 04 18 2011 cp.wu
* [WCXRP00000636] [WHQL][MT5931 Driver] 2c_PMHibernate (hang on 2h)
* 1) add API for glue layer to query ACPI state
* 2) Windows glue should not access to hardware after switched into D3 state
*
* 04 13 2011 cp.wu
* [WCXRP00000639] [WHQL][MT5931 Driver] 2c_PMStandby test item can not complete
* refine for MT5931/MT6620 logic separation.
*
* 04 13 2011 cp.wu
* [WCXRP00000639] [WHQL][MT5931 Driver] 2c_PMStandby test item can not complete
* bugfix: firmware download procedure for ACPI state transition is not complete.
*
* 03 15 2011 cp.wu
* [WCXRP00000559] [MT6620 Wi-Fi][Driver] Combine TX/RX DMA buffers into a single one to reduce physically continuous memory consumption
* 1. deprecate CFG_HANDLE_IST_IN_SDIO_CALLBACK
* 2. Use common coalescing buffer for both TX/RX directions
*
*
* 03 07 2011 terry.wu
* [WCXRP00000521] [MT6620 Wi-Fi][Driver] Remove non-standard debug message
* Toggle non-standard debug messages to comments.
*
* 12 31 2010 cp.wu
* [WCXRP00000335] [MT6620 Wi-Fi][Driver] change to use milliseconds sleep instead of delay to avoid blocking to system scheduling
* change to use msleep() and shorten waiting interval to reduce blocking to other task while Wi-Fi driver is being loaded
*
* 12 31 2010 cp.wu
* [WCXRP00000327] [MT6620 Wi-Fi][Driver] Improve HEC WHQA 6972 workaround coverage in driver side
* check success or failure for setting fw-own
*
* 12 30 2010 cp.wu
* [WCXRP00000327] [MT6620 Wi-Fi][Driver] Improve HEC WHQA 6972 workaround coverage in driver side
* host driver not to set FW-own when there is still pending interrupts
*
* 10 07 2010 cp.wu
* [WCXRP00000083] [MT5931][Driver][FW] Add necessary logic for MT5931 first connection
* add firmware download for MT5931.
*
* 09 21 2010 cp.wu
* [WCXRP00000053] [MT6620 Wi-Fi][Driver] Reset incomplete and might leads to BSOD when entering RF test with AIS associated
* Do a complete reset with STA-REC null checking for RF test re-entry
*
* 08 30 2010 cp.wu
* NULL
* eliminate klockwork errors
*
* 08 30 2010 cp.wu
* NULL
* reset ACPI power state before waking up MT6620 Wi-Fi firmware.
*
* 08 12 2010 cp.wu
* NULL
* [AIS-FSM] honor registry setting for adhoc running mode. (A/B/G)
*
* 08 03 2010 cp.wu
* NULL
* Centralize mgmt/system service procedures into independent calls.
*
* 07 22 2010 cp.wu
*
* 1) refine AIS-FSM indent.
* 2) when entering RF Test mode, flush 802.1X frames as well
* 3) when entering D3 state, flush 802.1X frames as well
*
* 07 08 2010 cp.wu
*
* [WPD00003833] [MT6620 and MT5931] Driver migration - move to new repository.
*
* 06 21 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* change MAC address updating logic.
*
* 06 06 2010 kevin.huang
* [WPD00003832][MT6620 5931] Create driver base
* [MT6620 5931] Create driver base
*
* 05 24 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) when acquiring LP-own, write for clr-own with lower frequency compared to read poll
* 2) correct address list parsing
*
* 05 22 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* sleepy notify is only used for sleepy state,
* while wake-up state is automatically set when host needs to access device
*
* 05 19 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* correct hibernation problem.
*
* 04 26 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) surpress compiler warning
* 2) when acqruing LP-own, keep writing WHLPCR whenever OWN is not acquired yet
*
* 04 23 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* surpress compiler warning
*
* 04 22 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* when acquiring driver-own, wait for up to 8 seconds.
*
* 04 21 2010 jeffrey.chang
* [WPD00003826]Initial import for Linux port
* add for private ioctl support
*
* 04 20 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) remove redundant firmware image unloading
* * 2) use compile-time macros to separate logic related to accquiring own
*
* 04 16 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* treat BUS access failure as kind of card removal.
*
* 04 07 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* accessing to firmware load/start address, and access to OID handling information
* * are now handled in glue layer
*
* 04 06 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* ePowerCtrl is not necessary as a glue variable.
*
* 04 06 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* add KAL API: kalFlushPendingTxPackets(), and take use of the API
*
* 04 06 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* eliminate direct access for prGlueInfo->fgIsCardRemoved in non-glue layer
*
* 03 24 2010 jeffrey.chang
* [WPD00003826]Initial import for Linux port
* initial import for Linux port
*
* 03 22 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* always send CMD_NIC_POWER_CTRL packet when nic is being halted
*
* 03 19 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* correct typo.
*
* 03 19 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) add ACPI D0/D3 state switching support
* * * * * * * * 2) use more formal way to handle interrupt when the status is retrieved from enhanced RX response
*
* 03 08 2010 cp.wu
* [WPD00001943]Create WiFi test driver framework on WinXP
* 1) add another spin-lock to protect MsduInfoList due to it might be accessed by different thread.
* * 2) change own-back acquiring procedure to wait for up to 16.67 seconds
** \main\maintrunk.MT6620WiFiDriver_Prj\6 2009-10-13 21:59:15 GMT mtk01084
** update for new HW design
** \main\maintrunk.MT6620WiFiDriver_Prj\5 2009-09-09 17:26:36 GMT mtk01084
** remove CMD52 access
** \main\maintrunk.MT6620WiFiDriver_Prj\4 2009-05-18 14:50:29 GMT mtk01084
** modify lines in nicpmSetDriverOwn()
** \main\maintrunk.MT6620WiFiDriver_Prj\3 2009-03-23 16:55:37 GMT mtk01084
** modify nicpmSetDriverOwn()
** \main\maintrunk.MT6620WiFiDriver_Prj\2 2009-03-19 18:33:00 GMT mtk01084
** update for basic power management functions
** \main\maintrunk.MT6620WiFiDriver_Prj\1 2009-03-19 15:05:32 GMT mtk01084
** Initial version
**
*/
/*******************************************************************************
* C O M P I L E R F L A G S
********************************************************************************
*/
/*******************************************************************************
* E X T E R N A L R E F E R E N C E S
********************************************************************************
*/
#include "precomp.h"
/*******************************************************************************
* C O N S T A N T S
********************************************************************************
*/
/*******************************************************************************
* D A T A T Y P E S
********************************************************************************
*/
/*******************************************************************************
* P U B L I C D A T A
********************************************************************************
*/ extern BOOLEAN fgIsResetting;
/*******************************************************************************
* P R I V A T E D A T A
********************************************************************************
*/
/*******************************************************************************
* M A C R O S
********************************************************************************
*/
/*******************************************************************************
* F U N C T I O N D E C L A R A T I O N S
********************************************************************************
*/
/*******************************************************************************
* F U N C T I O N S
********************************************************************************
*/
/*----------------------------------------------------------------------------*/
/*!
* \brief This routine is used to process the POWER ON procedure.
*
* \param[in] pvAdapter Pointer to the Adapter structure.
*
* \return (none)
*/
/*----------------------------------------------------------------------------*/
VOID nicpmSetFWOwn(IN P_ADAPTER_T prAdapter, IN BOOLEAN fgEnableGlobalInt)
{
UINT_32 u4RegValue;
ASSERT(prAdapter);
if (prAdapter->fgIsFwOwn == TRUE) {
return;
} else {
if (nicProcessIST(prAdapter) != WLAN_STATUS_NOT_INDICATING) {
/* pending interrupts */
return;
}
}
if (fgEnableGlobalInt) {
prAdapter->fgIsIntEnableWithLPOwnSet = TRUE;
} else {
HAL_MCR_WR(prAdapter, MCR_WHLPCR, WHLPCR_FW_OWN_REQ_SET);
HAL_MCR_RD(prAdapter, MCR_WHLPCR, &u4RegValue);
if (u4RegValue & WHLPCR_FW_OWN_REQ_SET) {
/* if set firmware own not successful (possibly pending interrupts), */
/* indicate an own clear event */
HAL_MCR_WR(prAdapter, MCR_WHLPCR, WHLPCR_FW_OWN_REQ_CLR);
return;
}
prAdapter->fgIsFwOwn = TRUE;
}
}
/*----------------------------------------------------------------------------*/
/*!
* \brief This routine is used to process the POWER OFF procedure.
*
* \param[in] pvAdapter Pointer to the Adapter structure.
*
* \return (none)
*/
/*----------------------------------------------------------------------------*/
BOOLEAN nicpmSetDriverOwn(IN P_ADAPTER_T prAdapter)
{
#define LP_OWN_BACK_TOTAL_DELAY_MS 8192 /* exponential of 2 */
#define LP_OWN_BACK_LOOP_DELAY_MS 1 /* exponential of 2 */
#define LP_OWN_BACK_CLR_OWN_ITERATION 256 /* exponential of 2 */
BOOLEAN fgStatus = TRUE;
UINT_32 i, u4CurrTick, u4RegValue = 0;
ASSERT(prAdapter);
if (prAdapter->fgIsFwOwn == FALSE)
return fgStatus;
u4CurrTick = kalGetTimeTick();
i = 0;
while (1) {
HAL_MCR_RD(prAdapter, MCR_WHLPCR, &u4RegValue);
if (u4RegValue & WHLPCR_FW_OWN_REQ_SET) {
prAdapter->fgIsFwOwn = FALSE;
break;
} else if (kalIsCardRemoved(prAdapter->prGlueInfo) == TRUE
|| fgIsBusAccessFailed == TRUE
|| (kalGetTimeTick() - u4CurrTick) > LP_OWN_BACK_TOTAL_DELAY_MS
|| fgIsResetting == TRUE) {
/* ERRORLOG(("LP cannot be own back (for %ld ms)", kalGetTimeTick() - u4CurrTick)); */
fgStatus = FALSE;
break;
} else {
if ((i & (LP_OWN_BACK_CLR_OWN_ITERATION - 1)) == 0) {
/* Software get LP ownership - per 256 iterations */
HAL_MCR_WR(prAdapter, MCR_WHLPCR, WHLPCR_FW_OWN_REQ_CLR);
}
/* Delay for LP engine to complete its operation. */
kalMsleep(LP_OWN_BACK_LOOP_DELAY_MS);
i++;
}
}
return fgStatus;
}
/*----------------------------------------------------------------------------*/
/*!
* \brief This routine is used to set ACPI power mode to D0.
*
* \param[in] pvAdapter Pointer to the Adapter structure.
*
* \return (none)
*/
/*----------------------------------------------------------------------------*/
BOOLEAN nicpmSetAcpiPowerD0(IN P_ADAPTER_T prAdapter)
{
WLAN_STATUS u4Status = WLAN_STATUS_SUCCESS;
UINT_32 u4Value = 0, u4WHISR = 0;
UINT_8 aucTxCount[8];
UINT_32 i;
#if CFG_ENABLE_FW_DOWNLOAD
UINT_32 u4FwImgLength, u4FwLoadAddr, u4ImgSecSize;
PVOID prFwMappingHandle;
PVOID pvFwImageMapFile = NULL;
#if CFG_ENABLE_FW_DIVIDED_DOWNLOAD
UINT_32 j;
P_FIRMWARE_DIVIDED_DOWNLOAD_T prFwHead;
BOOLEAN fgValidHead;
const UINT_32 u4CRCOffset = offsetof(FIRMWARE_DIVIDED_DOWNLOAD_T, u4NumOfEntries);
#endif
#endif
DEBUGFUNC("nicpmSetAcpiPowerD0");
ASSERT(prAdapter);
do {
/* 0. Reset variables in ADAPTER_T */
prAdapter->fgIsFwOwn = TRUE;
prAdapter->fgWiFiInSleepyState = FALSE;
prAdapter->rAcpiState = ACPI_STATE_D0;
prAdapter->fgIsEnterD3ReqIssued = FALSE;
#if defined(MT6620) || defined(MT6628)
/* 1. Request Ownership to enter F/W download state */
ACQUIRE_POWER_CONTROL_FROM_PM(prAdapter);
#if !CFG_ENABLE_FULL_PM
nicpmSetDriverOwn(prAdapter);
#endif
/* 2. Initialize the Adapter */
if ((u4Status = nicInitializeAdapter(prAdapter)) != WLAN_STATUS_SUCCESS) {
DBGLOG(INIT, ERROR, ("nicInitializeAdapter failed!\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
}
#endif
#if CFG_ENABLE_FW_DOWNLOAD
prFwMappingHandle =
kalFirmwareImageMapping(prAdapter->prGlueInfo, &pvFwImageMapFile,
&u4FwImgLength);
if (!prFwMappingHandle) {
DBGLOG(INIT, ERROR, ("Fail to load FW image from file!\n"));
pvFwImageMapFile = NULL;
}
#if defined(MT6620) || defined(MT6628)
if (pvFwImageMapFile) {
/* 3.1 disable interrupt, download is done by polling mode only */
nicDisableInterrupt(prAdapter);
/* 3.2 Initialize Tx Resource to fw download state */
nicTxInitResetResource(prAdapter);
/* 3.3 FW download here */
u4FwLoadAddr = kalGetFwLoadAddress(prAdapter->prGlueInfo);
#if CFG_ENABLE_FW_DIVIDED_DOWNLOAD
/* 3a. parse file header for decision of divided firmware download or not */
prFwHead = (P_FIRMWARE_DIVIDED_DOWNLOAD_T) pvFwImageMapFile;
if (prFwHead->u4Signature == MTK_WIFI_SIGNATURE &&
prFwHead->u4CRC == wlanCRC32((PUINT_8) pvFwImageMapFile + u4CRCOffset,
u4FwImgLength - u4CRCOffset)) {
fgValidHead = TRUE;
} else {
fgValidHead = FALSE;
}
/* 3b. engage divided firmware downloading */
if (fgValidHead == TRUE) {
for (i = 0; i < prFwHead->u4NumOfEntries; i++) {
#if CFG_ENABLE_FW_DOWNLOAD_AGGREGATION
if (wlanImageSectionDownloadAggregated(prAdapter,
prFwHead->
arSection[i].
u4DestAddr,
prFwHead->
arSection[i].
u4Length,
(PUINT_8)
pvFwImageMapFile +
prFwHead->
arSection[i].
u4Offset) !=
WLAN_STATUS_SUCCESS) {
DBGLOG(INIT, ERROR,
("Firmware scatter download failed!\n"));
u4Status = WLAN_STATUS_FAILURE;
}
#else
for (j = 0; j < prFwHead->arSection[i].u4Length;
j += CMD_PKT_SIZE_FOR_IMAGE) {
if (j + CMD_PKT_SIZE_FOR_IMAGE <
prFwHead->arSection[i].u4Length)
u4ImgSecSize = CMD_PKT_SIZE_FOR_IMAGE;
else
u4ImgSecSize =
prFwHead->arSection[i].u4Length - j;
if (wlanImageSectionDownload(prAdapter,
prFwHead->arSection[i].
u4DestAddr + j,
u4ImgSecSize,
(PUINT_8)
pvFwImageMapFile +
prFwHead->arSection[i].
u4Offset + j) !=
WLAN_STATUS_SUCCESS) {
DBGLOG(INIT, ERROR,
("Firmware scatter download failed!\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
}
}
#endif
/* escape from loop if any pending error occurs */
if (u4Status == WLAN_STATUS_FAILURE) {
break;
}
}
} else
#endif
#if CFG_ENABLE_FW_DOWNLOAD_AGGREGATION
if (wlanImageSectionDownloadAggregated(prAdapter,
u4FwLoadAddr,
u4FwImgLength,
(PUINT_8)
pvFwImageMapFile) !=
WLAN_STATUS_SUCCESS) {
DBGLOG(INIT, ERROR, ("Firmware scatter download failed!\n"));
u4Status = WLAN_STATUS_FAILURE;
}
#else
for (i = 0; i < u4FwImgLength; i += CMD_PKT_SIZE_FOR_IMAGE) {
if (i + CMD_PKT_SIZE_FOR_IMAGE < u4FwImgLength)
u4ImgSecSize = CMD_PKT_SIZE_FOR_IMAGE;
else
u4ImgSecSize = u4FwImgLength - i;
if (wlanImageSectionDownload(prAdapter,
u4FwLoadAddr + i,
u4ImgSecSize,
(PUINT_8) pvFwImageMapFile +
i) != WLAN_STATUS_SUCCESS) {
DBGLOG(INIT, ERROR,
("wlanImageSectionDownload failed!\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
}
}
#endif
if (u4Status != WLAN_STATUS_SUCCESS) {
kalFirmwareImageUnmapping(prAdapter->prGlueInfo, prFwMappingHandle,
pvFwImageMapFile);
break;
}
#if !CFG_ENABLE_FW_DOWNLOAD_ACK
/* Send INIT_CMD_ID_QUERY_PENDING_ERROR command and wait for response */
if (wlanImageQueryStatus(prAdapter) != WLAN_STATUS_SUCCESS) {
kalFirmwareImageUnmapping(prAdapter->prGlueInfo, prFwMappingHandle,
pvFwImageMapFile);
u4Status = WLAN_STATUS_FAILURE;
break;
}
#endif
kalFirmwareImageUnmapping(prAdapter->prGlueInfo, prFwMappingHandle,
pvFwImageMapFile);
} else {
u4Status = WLAN_STATUS_FAILURE;
break;
}
/* 4. send Wi-Fi Start command */
#if CFG_OVERRIDE_FW_START_ADDRESS
wlanConfigWifiFunc(prAdapter, TRUE, kalGetFwStartAddress(prAdapter->prGlueInfo));
#else
wlanConfigWifiFunc(prAdapter, FALSE, 0);
#endif
#elif defined(MT5931)
if (pvFwImageMapFile) {
DBGLOG(INIT, TRACE,
("Download Address: 0x%08X\n",
kalGetFwLoadAddress(prAdapter->prGlueInfo)));
DBGLOG(INIT, TRACE, ("Firmware Length: 0x%08X\n", u4FwImgLength));
do {
/* 1.0 whole-chip reset except HIFSYS */
HAL_MCR_WR(prAdapter, MCR_WMCSR, WMCSR_CHIP_RST);
HAL_MCR_WR(prAdapter, MCR_WMCSR, 0);
/* 1.1 wait for INIT_RDY */
i = 0;
while (1) {
HAL_MCR_RD(prAdapter, MCR_WMCSR, &u4Value);
if (u4Value & WMCSR_INI_RDY) {
DBGLOG(INIT, TRACE, ("INIT-RDY detected\n"));
break;
} else if (kalIsCardRemoved(prAdapter->prGlueInfo) == TRUE
|| fgIsBusAccessFailed == TRUE) {
u4Status = WLAN_STATUS_FAILURE;
break;
} else if (i >= CFG_RESPONSE_POLLING_TIMEOUT) {
DBGLOG(INIT, ERROR,
("Waiting for Init Ready bit: Timeout\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
} else {
i++;
kalMsleep(10);
}
}
/* 1.2 set KSEL/FLEN */
HAL_MCR_WR(prAdapter, MCR_FWCFG, u4FwImgLength >> 6);
/* 1.3 enable FWDL_EN */
HAL_MCR_WR(prAdapter, MCR_WMCSR, WMCSR_FWDLEN);
/* 1.4 wait for PLL_RDY */
i = 0;
while (1) {
HAL_MCR_RD(prAdapter, MCR_WMCSR, &u4Value);
if (u4Value & WMCSR_PLLRDY) {
DBGLOG(INIT, TRACE, ("PLL-RDY detected\n"));
break;
} else if (kalIsCardRemoved(prAdapter->prGlueInfo) == TRUE
|| fgIsBusAccessFailed == TRUE) {
u4Status = WLAN_STATUS_FAILURE;
break;
} else if (i >= CFG_RESPONSE_POLLING_TIMEOUT) {
DBGLOG(INIT, ERROR,
("Waiting for PLL Ready bit: Timeout\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
} else {
i++;
kalMsleep(10);
}
}
/* 2.1 turn on HIFSYS firmware download mode */
HAL_MCR_WR(prAdapter, MCR_FWDLSR, FWDLSR_FWDL_MODE);
/* 2.2 set starting address */
u4FwLoadAddr = kalGetFwLoadAddress(prAdapter->prGlueInfo);
HAL_MCR_WR(prAdapter, MCR_FWDLDSAR, u4FwLoadAddr);
/* 3. upload firmware */
for (i = 0; i < u4FwImgLength; i += CMD_PKT_SIZE_FOR_IMAGE) {
if (i + CMD_PKT_SIZE_FOR_IMAGE < u4FwImgLength)
u4ImgSecSize = CMD_PKT_SIZE_FOR_IMAGE;
else
u4ImgSecSize = u4FwImgLength - i;
if (wlanImageSectionDownload(prAdapter,
u4FwLoadAddr + i,
u4ImgSecSize,
(PUINT_8) pvFwImageMapFile +
i) != WLAN_STATUS_SUCCESS) {
DBGLOG(INIT, ERROR,
("Firmware scatter download failed!\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
}
}
/* 4.1 poll FWDL_OK & FWDL_FAIL bits */
i = 0;
while (1) {
HAL_MCR_RD(prAdapter, MCR_WMCSR, &u4Value);
if (u4Value & WMCSR_DL_OK) {
DBGLOG(INIT, TRACE, ("DL_OK detected\n"));
break;
} else if (kalIsCardRemoved(prAdapter->prGlueInfo) == TRUE
|| fgIsBusAccessFailed == TRUE
|| (u4Value & WMCSR_DL_FAIL)) {
DBGLOG(INIT, ERROR,
("DL_FAIL detected: 0x%08X\n", u4Value));
u4Status = WLAN_STATUS_FAILURE;
break;
} else if (i >= CFG_RESPONSE_POLLING_TIMEOUT) {
DBGLOG(INIT, ERROR,
("Waiting for DL_OK/DL_FAIL bit: Timeout\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
} else {
i++;
kalMsleep(10);
}
}
/* 4.2 turn off HIFSYS download mode */
HAL_MCR_WR(prAdapter, MCR_FWDLSR, 0);
} while (FALSE);
} else {
DBGLOG(INIT, ERROR, ("No Firmware found!\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
}
#endif
#endif
/* 5. check Wi-Fi FW asserts ready bit */
DBGLOG(INIT, TRACE, ("wlanAdapterStart(): Waiting for Ready bit..\n"));
i = 0;
while (1) {
HAL_MCR_RD(prAdapter, MCR_WCIR, &u4Value);
if (u4Value & WCIR_WLAN_READY) {
DBGLOG(INIT, TRACE, ("Ready bit asserted\n"));
break;
} else if (kalIsCardRemoved(prAdapter->prGlueInfo) == TRUE
|| fgIsBusAccessFailed == TRUE) {
u4Status = WLAN_STATUS_FAILURE;
break;
} else if (i >= CFG_RESPONSE_POLLING_TIMEOUT) {
DBGLOG(INIT, ERROR, ("Waiting for Ready bit: Timeout\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
} else {
i++;
kalMsleep(10);
}
}
#if defined(MT5931)
/* Acquire LP-OWN */
ACQUIRE_POWER_CONTROL_FROM_PM(prAdapter);
#if !CFG_ENABLE_FULL_PM
nicpmSetDriverOwn(prAdapter);
#endif
/* 2. Initialize the Adapter */
if ((u4Status = nicInitializeAdapter(prAdapter)) != WLAN_STATUS_SUCCESS) {
DBGLOG(INIT, ERROR, ("nicInitializeAdapter failed!\n"));
u4Status = WLAN_STATUS_FAILURE;
break;
}
#endif
if (u4Status == WLAN_STATUS_SUCCESS) {
/* 6.1 reset interrupt status */
HAL_READ_INTR_STATUS(prAdapter, 4, (PUINT_8) & u4WHISR);
if (HAL_IS_TX_DONE_INTR(u4WHISR)) {
HAL_READ_TX_RELEASED_COUNT(prAdapter, aucTxCount);
}
/* 6.2 reset TX Resource for normal operation */
nicTxResetResource(prAdapter);
/* 6.3 Enable interrupt */
nicEnableInterrupt(prAdapter);
/* 6.4 Override network address */
wlanUpdateNetworkAddress(prAdapter);
/* 6.5 indicate disconnection as default status */
kalIndicateStatusAndComplete(prAdapter->prGlueInfo,
WLAN_STATUS_MEDIA_DISCONNECT, NULL, 0);
}
RECLAIM_POWER_CONTROL_TO_PM(prAdapter, FALSE);
/* MGMT Initialization */
nicInitMGMT(prAdapter, NULL);
} while (FALSE);
if (u4Status != WLAN_STATUS_SUCCESS) {
return FALSE;
} else {
return TRUE;
}
}
/*----------------------------------------------------------------------------*/
/*!
* @brief This routine is used to set ACPI power mode to D3.
*
* @param prAdapter pointer to the Adapter handler
*
* @return (none)
*/
/*----------------------------------------------------------------------------*/
BOOLEAN nicpmSetAcpiPowerD3(IN P_ADAPTER_T prAdapter)
{
UINT_32 i;
ASSERT(prAdapter);
/* 1. MGMT - unitialization */
nicUninitMGMT(prAdapter);
/* 2. Disable Interrupt */
nicDisableInterrupt(prAdapter);
/* 3. emit CMD_NIC_POWER_CTRL command packet */
wlanSendNicPowerCtrlCmd(prAdapter, 1);
/* 4. Clear Interrupt Status */
i = 0;
while (i < CFG_IST_LOOP_COUNT && nicProcessIST(prAdapter) != WLAN_STATUS_NOT_INDICATING) {
i++;
};
/* 5. Remove pending TX */
nicTxRelease(prAdapter);
/* 5.1 clear pending Security / Management Frames */
kalClearSecurityFrames(prAdapter->prGlueInfo);
kalClearMgmtFrames(prAdapter->prGlueInfo);
/* 5.2 clear pending TX packet queued in glue layer */
kalFlushPendingTxPackets(prAdapter->prGlueInfo);
/* 6. Set Onwership to F/W */
nicpmSetFWOwn(prAdapter, FALSE);
/* 7. Set variables */
prAdapter->rAcpiState = ACPI_STATE_D3;
return TRUE;
}
| gpl-2.0 |
adegroote/linux | sound/soc/codecs/cx20442.c | 209 | 10689 | /*
* cx20442.c -- CX20442 ALSA Soc Audio driver
*
* Copyright 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
*
* Initially based on sound/soc/codecs/wm8400.c
* Copyright 2008, 2009 Wolfson Microelectronics PLC.
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include "cx20442.h"
struct cx20442_priv {
void *control_data;
struct regulator *por;
};
#define CX20442_PM 0x0
#define CX20442_TELIN 0
#define CX20442_TELOUT 1
#define CX20442_MIC 2
#define CX20442_SPKOUT 3
#define CX20442_AGC 4
static const struct snd_soc_dapm_widget cx20442_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("TELOUT"),
SND_SOC_DAPM_OUTPUT("SPKOUT"),
SND_SOC_DAPM_OUTPUT("AGCOUT"),
SND_SOC_DAPM_MIXER("SPKOUT Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("TELOUT Amp", CX20442_PM, CX20442_TELOUT, 0, NULL, 0),
SND_SOC_DAPM_PGA("SPKOUT Amp", CX20442_PM, CX20442_SPKOUT, 0, NULL, 0),
SND_SOC_DAPM_PGA("SPKOUT AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0),
SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_MIXER("Input Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MICBIAS("TELIN Bias", CX20442_PM, CX20442_TELIN, 0),
SND_SOC_DAPM_MICBIAS("MIC Bias", CX20442_PM, CX20442_MIC, 0),
SND_SOC_DAPM_PGA("MIC AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0),
SND_SOC_DAPM_INPUT("TELIN"),
SND_SOC_DAPM_INPUT("MIC"),
SND_SOC_DAPM_INPUT("AGCIN"),
};
static const struct snd_soc_dapm_route cx20442_audio_map[] = {
{"TELOUT", NULL, "TELOUT Amp"},
{"SPKOUT", NULL, "SPKOUT Mixer"},
{"SPKOUT Mixer", NULL, "SPKOUT Amp"},
{"TELOUT Amp", NULL, "DAC"},
{"SPKOUT Amp", NULL, "DAC"},
{"SPKOUT Mixer", NULL, "SPKOUT AGC"},
{"SPKOUT AGC", NULL, "AGCIN"},
{"AGCOUT", NULL, "MIC AGC"},
{"MIC AGC", NULL, "MIC"},
{"MIC Bias", NULL, "MIC"},
{"Input Mixer", NULL, "MIC Bias"},
{"TELIN Bias", NULL, "TELIN"},
{"Input Mixer", NULL, "TELIN Bias"},
{"ADC", NULL, "Input Mixer"},
};
static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec,
unsigned int reg)
{
u8 *reg_cache = codec->reg_cache;
if (reg >= codec->driver->reg_cache_size)
return -EINVAL;
return reg_cache[reg];
}
enum v253_vls {
V253_VLS_NONE = 0,
V253_VLS_T,
V253_VLS_L,
V253_VLS_LT,
V253_VLS_S,
V253_VLS_ST,
V253_VLS_M,
V253_VLS_MST,
V253_VLS_S1,
V253_VLS_S1T,
V253_VLS_MS1T,
V253_VLS_M1,
V253_VLS_M1ST,
V253_VLS_M1S1T,
V253_VLS_H,
V253_VLS_HT,
V253_VLS_MS,
V253_VLS_MS1,
V253_VLS_M1S,
V253_VLS_M1S1,
V253_VLS_TEST,
};
static int cx20442_pm_to_v253_vls(u8 value)
{
switch (value & ~(1 << CX20442_AGC)) {
case 0:
return V253_VLS_T;
case (1 << CX20442_SPKOUT):
case (1 << CX20442_MIC):
case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC):
return V253_VLS_M1S1;
case (1 << CX20442_TELOUT):
case (1 << CX20442_TELIN):
case (1 << CX20442_TELOUT) | (1 << CX20442_TELIN):
return V253_VLS_L;
case (1 << CX20442_TELOUT) | (1 << CX20442_MIC):
return V253_VLS_NONE;
}
return -EINVAL;
}
static int cx20442_pm_to_v253_vsp(u8 value)
{
switch (value & ~(1 << CX20442_AGC)) {
case (1 << CX20442_SPKOUT):
case (1 << CX20442_MIC):
case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC):
return (bool)(value & (1 << CX20442_AGC));
}
return (value & (1 << CX20442_AGC)) ? -EINVAL : 0;
}
static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
u8 *reg_cache = codec->reg_cache;
int vls, vsp, old, len;
char buf[18];
if (reg >= codec->driver->reg_cache_size)
return -EINVAL;
/* hw_write and control_data pointers required for talking to the modem
* are expected to be set by the line discipline initialization code */
if (!codec->hw_write || !cx20442->control_data)
return -EIO;
old = reg_cache[reg];
reg_cache[reg] = value;
vls = cx20442_pm_to_v253_vls(value);
if (vls < 0)
return vls;
vsp = cx20442_pm_to_v253_vsp(value);
if (vsp < 0)
return vsp;
if ((vls == V253_VLS_T) ||
(vls == cx20442_pm_to_v253_vls(old))) {
if (vsp == cx20442_pm_to_v253_vsp(old))
return 0;
len = snprintf(buf, ARRAY_SIZE(buf), "at+vsp=%d\r", vsp);
} else if (vsp == cx20442_pm_to_v253_vsp(old))
len = snprintf(buf, ARRAY_SIZE(buf), "at+vls=%d\r", vls);
else
len = snprintf(buf, ARRAY_SIZE(buf),
"at+vls=%d;+vsp=%d\r", vls, vsp);
if (unlikely(len > (ARRAY_SIZE(buf) - 1)))
return -ENOMEM;
dev_dbg(codec->dev, "%s: %s\n", __func__, buf);
if (codec->hw_write(cx20442->control_data, buf, len) != len)
return -EIO;
return 0;
}
/*
* Line discpline related code
*
* Any of the callback functions below can be used in two ways:
* 1) registerd by a machine driver as one of line discipline operations,
* 2) called from a machine's provided line discipline callback function
* in case when extra machine specific code must be run as well.
*/
/* Modem init: echo off, digital speaker off, quiet off, voice mode */
static const char *v253_init = "ate0m0q0+fclass=8\r";
/* Line discipline .open() */
static int v253_open(struct tty_struct *tty)
{
int ret, len = strlen(v253_init);
/* Doesn't make sense without write callback */
if (!tty->ops->write)
return -EINVAL;
/* Won't work if no codec pointer has been passed by a card driver */
if (!tty->disc_data)
return -ENODEV;
if (tty->ops->write(tty, v253_init, len) != len) {
ret = -EIO;
goto err;
}
/* Actual setup will be performed after the modem responds. */
return 0;
err:
tty->disc_data = NULL;
return ret;
}
/* Line discipline .close() */
static void v253_close(struct tty_struct *tty)
{
struct snd_soc_codec *codec = tty->disc_data;
struct cx20442_priv *cx20442;
tty->disc_data = NULL;
if (!codec)
return;
cx20442 = snd_soc_codec_get_drvdata(codec);
/* Prevent the codec driver from further accessing the modem */
codec->hw_write = NULL;
cx20442->control_data = NULL;
codec->component.card->pop_time = 0;
}
/* Line discipline .hangup() */
static int v253_hangup(struct tty_struct *tty)
{
v253_close(tty);
return 0;
}
/* Line discipline .receive_buf() */
static void v253_receive(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct snd_soc_codec *codec = tty->disc_data;
struct cx20442_priv *cx20442;
if (!codec)
return;
cx20442 = snd_soc_codec_get_drvdata(codec);
if (!cx20442->control_data) {
/* First modem response, complete setup procedure */
/* Set up codec driver access to modem controls */
cx20442->control_data = tty;
codec->hw_write = (hw_write_t)tty->ops->write;
codec->component.card->pop_time = 1;
}
}
/* Line discipline .write_wakeup() */
static void v253_wakeup(struct tty_struct *tty)
{
}
struct tty_ldisc_ops v253_ops = {
.magic = TTY_LDISC_MAGIC,
.name = "cx20442",
.owner = THIS_MODULE,
.open = v253_open,
.close = v253_close,
.hangup = v253_hangup,
.receive_buf = v253_receive,
.write_wakeup = v253_wakeup,
};
EXPORT_SYMBOL_GPL(v253_ops);
/*
* Codec DAI
*/
static struct snd_soc_dai_driver cx20442_dai = {
.name = "cx20442-voice",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
};
static int cx20442_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
int err = 0;
switch (level) {
case SND_SOC_BIAS_PREPARE:
if (codec->dapm.bias_level != SND_SOC_BIAS_STANDBY)
break;
if (IS_ERR(cx20442->por))
err = PTR_ERR(cx20442->por);
else
err = regulator_enable(cx20442->por);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level != SND_SOC_BIAS_PREPARE)
break;
if (IS_ERR(cx20442->por))
err = PTR_ERR(cx20442->por);
else
err = regulator_disable(cx20442->por);
break;
default:
break;
}
if (!err)
codec->dapm.bias_level = level;
return err;
}
static int cx20442_codec_probe(struct snd_soc_codec *codec)
{
struct cx20442_priv *cx20442;
cx20442 = kzalloc(sizeof(struct cx20442_priv), GFP_KERNEL);
if (cx20442 == NULL)
return -ENOMEM;
cx20442->por = regulator_get(codec->dev, "POR");
if (IS_ERR(cx20442->por))
dev_warn(codec->dev, "failed to get the regulator");
cx20442->control_data = NULL;
snd_soc_codec_set_drvdata(codec, cx20442);
codec->hw_write = NULL;
codec->component.card->pop_time = 0;
return 0;
}
/* power down chip */
static int cx20442_codec_remove(struct snd_soc_codec *codec)
{
struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
if (cx20442->control_data) {
struct tty_struct *tty = cx20442->control_data;
tty_hangup(tty);
}
if (!IS_ERR(cx20442->por)) {
/* should be already in STANDBY, hence disabled */
regulator_put(cx20442->por);
}
snd_soc_codec_set_drvdata(codec, NULL);
kfree(cx20442);
return 0;
}
static const u8 cx20442_reg;
static struct snd_soc_codec_driver cx20442_codec_dev = {
.probe = cx20442_codec_probe,
.remove = cx20442_codec_remove,
.set_bias_level = cx20442_set_bias_level,
.reg_cache_default = &cx20442_reg,
.reg_cache_size = 1,
.reg_word_size = sizeof(u8),
.read = cx20442_read_reg_cache,
.write = cx20442_write,
.dapm_widgets = cx20442_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cx20442_dapm_widgets),
.dapm_routes = cx20442_audio_map,
.num_dapm_routes = ARRAY_SIZE(cx20442_audio_map),
};
static int cx20442_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev,
&cx20442_codec_dev, &cx20442_dai, 1);
}
static int __exit cx20442_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
}
static struct platform_driver cx20442_platform_driver = {
.driver = {
.name = "cx20442-codec",
},
.probe = cx20442_platform_probe,
.remove = __exit_p(cx20442_platform_remove),
};
module_platform_driver(cx20442_platform_driver);
MODULE_DESCRIPTION("ASoC CX20442-11 voice modem codec driver");
MODULE_AUTHOR("Janusz Krzysztofik");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cx20442-codec");
| gpl-2.0 |
rhtu/linux | drivers/gpu/drm/sti/sti_gdp.c | 209 | 17340 | /*
* Copyright (C) STMicroelectronics SA 2014
* Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
* Fabien Dessenne <fabien.dessenne@st.com>
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "sti_compositor.h"
#include "sti_gdp.h"
#include "sti_plane.h"
#include "sti_vtg.h"
#define ALPHASWITCH BIT(6)
#define ENA_COLOR_FILL BIT(8)
#define BIGNOTLITTLE BIT(23)
#define WAIT_NEXT_VSYNC BIT(31)
/* GDP color formats */
#define GDP_RGB565 0x00
#define GDP_RGB888 0x01
#define GDP_RGB888_32 0x02
#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB8565 0x04
#define GDP_ARGB8888 0x05
#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB1555 0x06
#define GDP_ARGB4444 0x07
#define GDP_CLUT8 0x0B
#define GDP_YCBR888 0x10
#define GDP_YCBR422R 0x12
#define GDP_AYCBR8888 0x15
#define GAM_GDP_CTL_OFFSET 0x00
#define GAM_GDP_AGC_OFFSET 0x04
#define GAM_GDP_VPO_OFFSET 0x0C
#define GAM_GDP_VPS_OFFSET 0x10
#define GAM_GDP_PML_OFFSET 0x14
#define GAM_GDP_PMP_OFFSET 0x18
#define GAM_GDP_SIZE_OFFSET 0x1C
#define GAM_GDP_NVN_OFFSET 0x24
#define GAM_GDP_KEY1_OFFSET 0x28
#define GAM_GDP_KEY2_OFFSET 0x2C
#define GAM_GDP_PPT_OFFSET 0x34
#define GAM_GDP_CML_OFFSET 0x3C
#define GAM_GDP_MST_OFFSET 0x68
#define GAM_GDP_ALPHARANGE_255 BIT(5)
#define GAM_GDP_AGC_FULL_RANGE 0x00808080
#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
#define GAM_GDP_SIZE_MAX 0x7FF
#define GDP_NODE_NB_BANK 2
#define GDP_NODE_PER_FIELD 2
struct sti_gdp_node {
u32 gam_gdp_ctl;
u32 gam_gdp_agc;
u32 reserved1;
u32 gam_gdp_vpo;
u32 gam_gdp_vps;
u32 gam_gdp_pml;
u32 gam_gdp_pmp;
u32 gam_gdp_size;
u32 reserved2;
u32 gam_gdp_nvn;
u32 gam_gdp_key1;
u32 gam_gdp_key2;
u32 reserved3;
u32 gam_gdp_ppt;
u32 reserved4;
u32 gam_gdp_cml;
};
struct sti_gdp_node_list {
struct sti_gdp_node *top_field;
dma_addr_t top_field_paddr;
struct sti_gdp_node *btm_field;
dma_addr_t btm_field_paddr;
};
/**
* STI GDP structure
*
* @sti_plane: sti_plane structure
* @dev: driver device
* @regs: gdp registers
* @clk_pix: pixel clock for the current gdp
* @clk_main_parent: gdp parent clock if main path used
* @clk_aux_parent: gdp parent clock if aux path used
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
* @node_list: array of node list
*/
struct sti_gdp {
struct sti_plane plane;
struct device *dev;
void __iomem *regs;
struct clk *clk_pix;
struct clk *clk_main_parent;
struct clk *clk_aux_parent;
struct notifier_block vtg_field_nb;
bool is_curr_top;
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
};
#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_AYUV,
DRM_FORMAT_YUV444,
DRM_FORMAT_VYUY,
DRM_FORMAT_C8,
};
static int sti_gdp_fourcc2format(int fourcc)
{
switch (fourcc) {
case DRM_FORMAT_XRGB8888:
return GDP_RGB888_32;
case DRM_FORMAT_XBGR8888:
return GDP_XBGR8888;
case DRM_FORMAT_ARGB8888:
return GDP_ARGB8888;
case DRM_FORMAT_ABGR8888:
return GDP_ABGR8888;
case DRM_FORMAT_ARGB4444:
return GDP_ARGB4444;
case DRM_FORMAT_ARGB1555:
return GDP_ARGB1555;
case DRM_FORMAT_RGB565:
return GDP_RGB565;
case DRM_FORMAT_RGB888:
return GDP_RGB888;
case DRM_FORMAT_AYUV:
return GDP_AYCBR8888;
case DRM_FORMAT_YUV444:
return GDP_YCBR888;
case DRM_FORMAT_VYUY:
return GDP_YCBR422R;
case DRM_FORMAT_C8:
return GDP_CLUT8;
}
return -1;
}
static int sti_gdp_get_alpharange(int format)
{
switch (format) {
case GDP_ARGB8565:
case GDP_ARGB8888:
case GDP_AYCBR8888:
case GDP_ABGR8888:
return GAM_GDP_ALPHARANGE_255;
}
return 0;
}
/**
* sti_gdp_get_free_nodes
* @gdp: gdp pointer
*
* Look for a GDP node list that is not currently read by the HW.
*
* RETURNS:
* Pointer to the free GDP node list
*/
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
unsigned int i;
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
for (i = 0; i < GDP_NODE_NB_BANK; i++)
if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
(hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
/* in hazardious cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
sti_plane_to_str(&gdp->plane), hw_nvn);
end:
return &gdp->node_list[0];
}
/**
* sti_gdp_get_current_nodes
* @gdp: gdp pointer
*
* Look for GDP nodes that are currently read by the HW.
*
* RETURNS:
* Pointer to the current GDP node list
*/
static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
{
int hw_nvn;
unsigned int i;
hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
if (!hw_nvn)
goto end;
for (i = 0; i < GDP_NODE_NB_BANK; i++)
if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
(hw_nvn == gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
end:
DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
hw_nvn, sti_plane_to_str(&gdp->plane));
return NULL;
}
/**
* sti_gdp_disable
* @gdp: gdp pointer
*
* Disable a GDP.
*/
static void sti_gdp_disable(struct sti_gdp *gdp)
{
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
unsigned int i;
DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
/* Set the nodes as 'to be ignored on mixer' */
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
clk_disable_unprepare(gdp->clk_pix);
gdp->plane.status = STI_PLANE_DISABLED;
}
/**
* sti_gdp_field_cb
* @nb: notifier block
* @event: event message
* @data: private data
*
* Handle VTG top field and bottom field event.
*
* RETURNS:
* 0 on success.
*/
int sti_gdp_field_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
if (gdp->plane.status == STI_PLANE_FLUSHING) {
/* disable need to be synchronize on vsync event */
DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
sti_plane_to_str(&gdp->plane));
sti_gdp_disable(gdp);
}
switch (event) {
case VTG_TOP_FIELD_EVENT:
gdp->is_curr_top = true;
break;
case VTG_BOTTOM_FIELD_EVENT:
gdp->is_curr_top = false;
break;
default:
DRM_ERROR("unsupported event: %lu\n", event);
break;
}
return 0;
}
static void sti_gdp_init(struct sti_gdp *gdp)
{
struct device_node *np = gdp->dev->of_node;
dma_addr_t dma_addr;
void *base;
unsigned int i, size;
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
base = dma_alloc_writecombine(gdp->dev,
size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
return;
}
memset(base, 0, size);
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].top_field = base;
gdp->node_list[i].top_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].btm_field = base;
gdp->node_list[i].btm_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
dma_addr += sizeof(struct sti_gdp_node);
}
if (of_device_is_compatible(np, "st,stih407-compositor")) {
/* GDP of STiH407 chip have its own pixel clock */
char *clk_name;
switch (gdp->plane.desc) {
case STI_GDP_0:
clk_name = "pix_gdp1";
break;
case STI_GDP_1:
clk_name = "pix_gdp2";
break;
case STI_GDP_2:
clk_name = "pix_gdp3";
break;
case STI_GDP_3:
clk_name = "pix_gdp4";
break;
default:
DRM_ERROR("GDP id not recognized\n");
return;
}
gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
if (IS_ERR(gdp->clk_main_parent))
DRM_ERROR("Cannot get main_parent clock\n");
gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
if (IS_ERR(gdp->clk_aux_parent))
DRM_ERROR("Cannot get aux_parent clock\n");
}
}
static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
struct drm_crtc *crtc = state->crtc;
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
struct drm_framebuffer *fb = state->fb;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
struct sti_mixer *mixer;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
struct drm_gem_cma_object *cma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
struct sti_gdp_node *top_field, *btm_field;
u32 dma_updated_top;
u32 dma_updated_btm;
int format;
unsigned int depth, bpp;
u32 ydo, xdo, yds, xds;
int res;
/* Manage the case where crtc is null (disabled) */
if (!crtc)
return;
mixer = to_sti_mixer(crtc);
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_plane_to_str(plane),
dst_w, dst_h, dst_x, dst_y,
src_w, src_h, src_x, src_y);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
btm_field = list->btm_field;
dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
sti_plane_to_str(plane), top_field, btm_field);
/* build the top field */
top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
format = sti_gdp_fourcc2format(fb->pixel_format);
if (format == -1) {
DRM_ERROR("Format not supported by GDP %.4s\n",
(char *)&fb->pixel_format);
return;
}
top_field->gam_gdp_ctl |= format;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
if (!cma_obj) {
DRM_ERROR("Can't get CMA GEM object for fb\n");
return;
}
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->pixel_format,
(unsigned long)cma_obj->paddr);
/* pixel memory location */
drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
top_field->gam_gdp_pml += src_x * (bpp >> 3);
top_field->gam_gdp_pml += src_y * fb->pitches[0];
/* input parameters */
top_field->gam_gdp_pmp = fb->pitches[0];
top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
/* output parameters */
ydo = sti_vtg_get_line_number(*mode, dst_y);
yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, dst_x);
xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
top_field->gam_gdp_vpo = (ydo << 16) | xdo;
top_field->gam_gdp_vps = (yds << 16) | xds;
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
top_field->gam_gdp_nvn = list->btm_field_paddr;
btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
fb->pitches[0];
if (first_prepare) {
/* Register gdp callback */
if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux,
&gdp->vtg_field_nb, mixer->id)) {
DRM_ERROR("Cannot register VTG notifier\n");
return;
}
/* Set and enable gdp clock */
if (gdp->clk_pix) {
struct clk *clkp;
int rate = mode->clock * 1000;
/* According to the mixer used, the gdp pixel clock
* should have a different parent clock. */
if (mixer->id == STI_MIXER_MAIN)
clkp = gdp->clk_main_parent;
else
clkp = gdp->clk_aux_parent;
if (clkp)
clk_set_parent(gdp->clk_pix, clkp);
res = clk_set_rate(gdp->clk_pix, rate);
if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
rate);
return;
}
if (clk_prepare_enable(gdp->clk_pix)) {
DRM_ERROR("Failed to prepare/enable gdp\n");
return;
}
}
}
/* Update the NVN field of the 'right' field of the current GDP node
* (being used by the HW) with the address of the updated ('free') top
* field GDP node.
* - In interlaced mode the 'right' field is the bottom field as we
* update frames starting from their top field
* - In progressive mode, we update both bottom and top fields which
* are equal nodes.
* At the next VSYNC, the updated node list will be used by the HW.
*/
curr_list = sti_gdp_get_current_nodes(gdp);
dma_updated_top = list->top_field_paddr;
dma_updated_btm = list->btm_field_paddr;
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
(unsigned long)cma_obj->paddr,
readl(gdp->regs + GAM_GDP_PML_OFFSET));
if (!curr_list) {
/* First update or invalid node should directly write in the
* hw register */
DRM_DEBUG_DRIVER("%s first update (or invalid node)",
sti_plane_to_str(plane));
writel(gdp->is_curr_top ?
dma_updated_btm : dma_updated_top,
gdp->regs + GAM_GDP_NVN_OFFSET);
goto end;
}
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (gdp->is_curr_top) {
/* Do not update in the middle of the frame, but
* postpone the update after the bottom field has
* been displayed */
curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
} else {
/* Direct update to avoid one frame delay */
writel(dma_updated_top,
gdp->regs + GAM_GDP_NVN_OFFSET);
}
} else {
/* Direct update for progressive to avoid one frame delay */
writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
}
end:
plane->status = STI_PLANE_UPDATED;
}
static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_update = sti_gdp_atomic_update,
.atomic_disable = sti_gdp_atomic_disable,
};
struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
unsigned int possible_crtcs,
enum drm_plane_type type)
{
struct sti_gdp *gdp;
int res;
gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
if (!gdp) {
DRM_ERROR("Failed to allocate memory for GDP\n");
return NULL;
}
gdp->dev = dev;
gdp->regs = baseaddr;
gdp->plane.desc = desc;
gdp->plane.status = STI_PLANE_DISABLED;
gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
sti_gdp_init(gdp);
res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
possible_crtcs,
&sti_plane_helpers_funcs,
gdp_supported_formats,
ARRAY_SIZE(gdp_supported_formats),
type);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err;
}
drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
sti_plane_init_property(&gdp->plane, type);
return &gdp->plane.drm_plane;
err:
devm_kfree(dev, gdp);
return NULL;
}
| gpl-2.0 |
KaSt/Kappa | drivers/scsi/lpfc/lpfc_mbox.c | 465 | 65962 | /*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#include <linux/blkdev.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_compat.h"
/**
* lpfc_dump_static_vport - Dump HBA's static vport information.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @offset: offset for dumping vport info.
*
* The dump mailbox command provides a method for the device driver to obtain
* various types of information from the HBA device.
*
* This routine prepares the mailbox command for dumping list of static
* vports to be created.
**/
int
lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
uint16_t offset)
{
MAILBOX_t *mb;
struct lpfc_dmabuf *mp;
mb = &pmb->u.mb;
/* Setup to dump vport info region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = offset;
mb->un.varDmp.region_id = DMP_REGION_VPORT;
mb->mbxOwner = OWN_HOST;
/* For SLI3 HBAs data is embedded in mailbox */
if (phba->sli_rev != LPFC_SLI_REV4) {
mb->un.varDmp.cv = 1;
mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
return 0;
}
/* For SLI4 HBAs driver need to allocate memory */
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2605 lpfc_dump_static_vport: memory"
" allocation failed\n");
return 1;
}
memset(mp->virt, 0, LPFC_BPL_SIZE);
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
pmb->context2 = (uint8_t *) mp;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
return 0;
}
/**
* lpfc_down_link - Bring down HBAs link.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* This routine prepares a mailbox command to bring down HBA link.
**/
void
lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb = &pmb->u.mb;
mb->mbxCommand = MBX_DOWN_LINK;
mb->mbxOwner = OWN_HOST;
}
/**
* lpfc_dump_mem - Prepare a mailbox command for reading a region.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @offset: offset into the region.
* @region_id: config region id.
*
* The dump mailbox command provides a method for the device driver to obtain
* various types of information from the HBA device.
*
* This routine prepares the mailbox command for dumping HBA's config region.
**/
void
lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
uint16_t region_id)
{
MAILBOX_t *mb;
void *ctx;
mb = &pmb->u.mb;
ctx = pmb->context2;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = offset;
mb->un.varDmp.region_id = region_id;
mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
pmb->context2 = ctx;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* This function create a dump memory mailbox command to dump wake up
* parameters.
*/
void
lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
void *ctx;
mb = &pmb->u.mb;
/* Save context so that we can restore after memset */
ctx = pmb->context2;
/* Setup to dump VPD region */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->mbxOwner = OWN_HOST;
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.entry_index = 0;
mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
mb->un.varDmp.resp_offset = 0;
pmb->context2 = ctx;
return;
}
/**
* lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read NVRAM mailbox command returns the HBA's non-volatile parameters
* that are used as defaults when the Fibre Channel link is brought on-line.
*
* This routine prepares the mailbox command for reading information stored
* in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
**/
void
lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_NV;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_config_async - Prepare a mailbox command for enabling HBA async event
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @ring: ring number for the asynchronous event to be configured.
*
* The asynchronous event enable mailbox command is used to enable the
* asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
* specifies the default ring to which events are posted.
*
* This routine prepares the mailbox command for enabling HBA asynchronous
* event support on a IOCB ring.
**/
void
lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
uint32_t ring)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
mb->un.varCfgAsyncEvent.ring = ring;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_heart_beat - Prepare a mailbox command for heart beat
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The heart beat mailbox command is used to detect an unresponsive HBA, which
* is defined as any device where no error attention is sent and both mailbox
* and rings are not processed.
*
* This routine prepares the mailbox command for issuing a heart beat in the
* form of mailbox command to the HBA. The timely completion of the heart
* beat mailbox command indicates the health of the HBA.
**/
void
lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_HEARTBEAT;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_read_la - Prepare a mailbox command for reading HBA link attention
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @mp: DMA buffer memory for reading the link attention information into.
*
* The read link attention mailbox command is issued to read the Link Event
* Attention information indicated by the HBA port when the Link Event bit
* of the Host Attention (HSTATT) register is set to 1. A Link Event
* Attention occurs based on an exception detected at the Fibre Channel link
* interface.
*
* This routine prepares the mailbox command for reading HBA link attention
* information. A DMA memory has been set aside and address passed to the
* HBA through @mp for the HBA to DMA link attention information into the
* memory as part of the execution of the mailbox command.
*
* Return codes
* 0 - Success (currently always return 0)
**/
int
lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
{
MAILBOX_t *mb;
struct lpfc_sli *psli;
psli = &phba->sli;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
INIT_LIST_HEAD(&mp->list);
mb->mbxCommand = MBX_READ_LA64;
mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
/* Save address for later completion and set the owner to host so that
* the FW knows this mailbox is available for processing.
*/
pmb->context1 = (uint8_t *) mp;
mb->mbxOwner = OWN_HOST;
return (0);
}
/**
* lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The clear link attention mailbox command is issued to clear the link event
* attention condition indicated by the Link Event bit of the Host Attention
* (HSTATT) register. The link event attention condition is cleared only if
* the event tag specified matches that of the current link event counter.
* The current event tag is read using the read link attention event mailbox
* command.
*
* This routine prepares the mailbox command for clearing HBA link attention
* information.
**/
void
lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varClearLA.eventTag = phba->fc_eventTag;
mb->mbxCommand = MBX_CLEAR_LA;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_config_link - Prepare a mailbox command for configuring link on a HBA
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure link mailbox command is used before the initialize link
* mailbox command to override default value and to configure link-oriented
* parameters such as DID address and various timers. Typically, this
* command would be used after an F_Port login to set the returned DID address
* and the fabric timeout values. This command is not valid before a configure
* port command has configured the HBA port.
*
* This routine prepares the mailbox command for configuring link on a HBA.
**/
void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
struct lpfc_vport *vport = phba->pport;
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
/* NEW_FEATURE
* SLI-2, Coalescing Response Feature.
*/
if (phba->cfg_cr_delay) {
mb->un.varCfgLnk.cr = 1;
mb->un.varCfgLnk.ci = 1;
mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
}
mb->un.varCfgLnk.myId = vport->fc_myDID;
mb->un.varCfgLnk.edtov = phba->fc_edtov;
mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
mb->un.varCfgLnk.ratov = phba->fc_ratov;
mb->un.varCfgLnk.rttov = phba->fc_rttov;
mb->un.varCfgLnk.altov = phba->fc_altov;
mb->un.varCfgLnk.crtov = phba->fc_crtov;
mb->un.varCfgLnk.citov = phba->fc_citov;
if (phba->cfg_ack0)
mb->un.varCfgLnk.ack0_enable = 1;
mb->mbxCommand = MBX_CONFIG_LINK;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_config_msi - Prepare a mailbox command for configuring msi-x
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure MSI-X mailbox command is used to configure the HBA's SLI-3
* MSI-X multi-message interrupt vector association to interrupt attention
* conditions.
*
* Return codes
* 0 - Success
* -EINVAL - Failure
**/
int
lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
uint32_t attentionConditions[2];
/* Sanity check */
if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0475 Not configured for supporting MSI-X "
"cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
return -EINVAL;
}
if (phba->sli_rev < 3) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0476 HBA not supporting SLI-3 or later "
"SLI Revision: 0x%x\n", phba->sli_rev);
return -EINVAL;
}
/* Clear mailbox command fields */
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
/*
* SLI-3, Message Signaled Interrupt Fearure.
*/
/* Multi-message attention configuration */
attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
HA_LATT | HA_MBATT);
attentionConditions[1] = 0;
mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
/*
* Set up message number to HA bit association
*/
#ifdef __BIG_ENDIAN_BITFIELD
/* RA0 (FCP Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
/* RA1 (Other Protocol Extra Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
#else /* __LITTLE_ENDIAN_BITFIELD */
/* RA0 (FCP Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
/* RA1 (Other Protocol Extra Ring) */
mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
#endif
/* Multi-message interrupt autoclear configuration*/
mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
/* For now, HBA autoclear does not work reliably, disable it */
mb->un.varCfgMSI.autoClearHA[0] = 0;
mb->un.varCfgMSI.autoClearHA[1] = 0;
/* Set command and owner bit */
mb->mbxCommand = MBX_CONFIG_MSI;
mb->mbxOwner = OWN_HOST;
return 0;
}
/**
* lpfc_init_link - Prepare a mailbox command for initialize link on a HBA
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @topology: the link topology for the link to be initialized to.
* @linkspeed: the link speed for the link to be initialized to.
*
* The initialize link mailbox command is used to initialize the Fibre
* Channel link. This command must follow a configure port command that
* establishes the mode of operation.
*
* This routine prepares the mailbox command for initializing link on a HBA
* with the specified link topology and speed.
**/
void
lpfc_init_link(struct lpfc_hba * phba,
LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
{
lpfc_vpd_t *vpd;
struct lpfc_sli *psli;
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
psli = &phba->sli;
switch (topology) {
case FLAGS_TOPOLOGY_MODE_LOOP_PT:
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
break;
case FLAGS_TOPOLOGY_MODE_PT_PT:
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
break;
case FLAGS_TOPOLOGY_MODE_LOOP:
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
break;
case FLAGS_TOPOLOGY_MODE_PT_LOOP:
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
break;
case FLAGS_LOCAL_LB:
mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
break;
}
/* Enable asynchronous ABTS responses from firmware */
mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
/* NEW_FEATURE
* Setting up the link speed
*/
vpd = &phba->vpd;
if (vpd->rev.feaLevelHigh >= 0x02){
switch(linkspeed){
case LINK_SPEED_1G:
case LINK_SPEED_2G:
case LINK_SPEED_4G:
case LINK_SPEED_8G:
mb->un.varInitLnk.link_flags |=
FLAGS_LINK_SPEED;
mb->un.varInitLnk.link_speed = linkspeed;
break;
case LINK_SPEED_AUTO:
default:
mb->un.varInitLnk.link_speed =
LINK_SPEED_AUTO;
break;
}
}
else
mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
mb->mbxOwner = OWN_HOST;
mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
return;
}
/**
* lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @vpi: virtual N_Port identifier.
*
* The read service parameter mailbox command is used to read the HBA port
* service parameters. The service parameters are read into the buffer
* specified directly by a BDE in the mailbox command. These service
* parameters may then be used to build the payload of an N_Port/F_POrt
* login request and reply (LOGI/ACC).
*
* This routine prepares the mailbox command for reading HBA port service
* parameters. The DMA memory is allocated in this function and the addresses
* are populated into the mailbox command for the HBA to DMA the service
* parameters into.
*
* Return codes
* 0 - Success
* 1 - DMA memory allocation failed
**/
int
lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
struct lpfc_sli *psli;
psli = &phba->sli;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold the HBAs Service Parameters */
mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp);
mb->mbxCommand = MBX_READ_SPARM64;
/* READ_SPARAM: no buffers */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0301 READ_SPARAM: no buffers\n");
return (1);
}
INIT_LIST_HEAD(&mp->list);
mb->mbxCommand = MBX_READ_SPARM64;
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
/* save address for completion */
pmb->context1 = mp;
return (0);
}
/**
* lpfc_unreg_did - Prepare a mailbox command for unregistering DID
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @did: remote port identifier.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The unregister DID mailbox command is used to unregister an N_Port/F_Port
* login for an unknown RPI by specifying the DID of a remote port. This
* command frees an RPI context in the HBA port. This has the effect of
* performing an implicit N_Port/F_Port logout.
*
* This routine prepares the mailbox command for unregistering a remote
* N_Port/F_Port (DID) login.
**/
void
lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
if (vpi != 0xffff)
vpi += phba->vpi_base;
mb->un.varUnregDID.vpi = vpi;
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_read_config - Prepare a mailbox command for reading HBA configuration
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read configuration mailbox command is used to read the HBA port
* configuration parameters. This mailbox command provides a method for
* seeing any parameters that may have changed via various configuration
* mailbox commands.
*
* This routine prepares the mailbox command for reading out HBA configuration
* parameters.
**/
void
lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_CONFIG;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read link status mailbox command is used to read the link status from
* the HBA. Link status includes all link-related error counters. These
* counters are maintained by the HBA and originated in the link hardware
* unit. Note that all of these counters wrap.
*
* This routine prepares the mailbox command for reading out HBA link status.
**/
void
lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->mbxCommand = MBX_READ_LNK_STAT;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_reg_rpi - Prepare a mailbox command for registering remote login
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @did: remote port identifier.
* @param: pointer to memory holding the server parameters.
* @pmb: pointer to the driver internal queue element for mailbox command.
* @flag: action flag to be passed back for the complete function.
*
* The registration login mailbox command is used to register an N_Port or
* F_Port login. This registration allows the HBA to cache the remote N_Port
* service parameters internally and thereby make the appropriate FC-2
* decisions. The remote port service parameters are handed off by the driver
* to the HBA using a descriptor entry that directly identifies a buffer in
* host memory. In exchange, the HBA returns an RPI identifier.
*
* This routine prepares the mailbox command for registering remote port login.
* The function allocates DMA buffer for passing the service parameters to the
* HBA with the mailbox command.
*
* Return codes
* 0 - Success
* 1 - DMA memory allocation failed
**/
int
lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
{
MAILBOX_t *mb = &pmb->u.mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
if (phba->sli_rev == LPFC_SLI_REV4) {
mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
return 1;
}
mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
mb->un.varRegLogin.did = did;
mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
mb->mbxOwner = OWN_HOST;
/* Get a buffer to hold NPorts Service Parameters */
mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp);
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
"flag x%x\n", vpi, did, flag);
return (1);
}
INIT_LIST_HEAD(&mp->list);
sparam = mp->virt;
/* Copy param's into a new buffer */
memcpy(sparam, param, sizeof (struct serv_parm));
/* save address for completion */
pmb->context1 = (uint8_t *) mp;
mb->mbxCommand = MBX_REG_LOGIN64;
mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
return (0);
}
/**
* lpfc_unreg_login - Prepare a mailbox command for unregistering remote login
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @rpi: remote port identifier
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The unregistration login mailbox command is used to unregister an N_Port
* or F_Port login. This command frees an RPI context in the HBA. It has the
* effect of performing an implicit N_Port/F_Port logout.
*
* This routine prepares the mailbox command for unregistering remote port
* login.
**/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregLogin.rpi = (uint16_t) rpi;
mb->un.varUnregLogin.rsvd1 = 0;
mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The registration vport identifier mailbox command is used to activate a
* virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
* N_Port_ID against the information in the selected virtual N_Port context
* block and marks it active to allow normal processing of IOCB commands and
* received unsolicited exchanges.
*
* This routine prepares the mailbox command for registering a virtual N_Port.
**/
void
lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
mb->un.varRegVpi.sid = vport->fc_myDID;
mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
mb->mbxCommand = MBX_REG_VPI;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The unregistration vport identifier mailbox command is used to inactivate
* a virtual N_Port. The driver must have logged out and unregistered all
* remote N_Ports to abort any activity on the virtual N_Port. The HBA will
* unregisters any default RPIs associated with the specified vpi, aborting
* any active exchanges. The HBA will post the mailbox response after making
* the virtual N_Port inactive.
*
* This routine prepares the mailbox command for unregistering a virtual
* N_Port.
**/
void
lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
mb->mbxCommand = MBX_UNREG_VPI;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB)
* @phba: pointer to lpfc hba data structure.
*
* This routine sets up and initializes the IOCB rings in the Port Control
* Block (PCB).
**/
static void
lpfc_config_pcb_setup(struct lpfc_hba * phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
PCB_t *pcbp = phba->pcb;
dma_addr_t pdma_addr;
uint32_t offset;
uint32_t iocbCnt = 0;
int i;
pcbp->maxRing = (psli->num_rings - 1);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
SLI2_IOCB_RSP_SIZE;
/* A ring MUST have both cmd and rsp entries defined to be
valid */
if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
pcbp->rdsc[i].cmdEntries = 0;
pcbp->rdsc[i].rspEntries = 0;
pcbp->rdsc[i].cmdAddrHigh = 0;
pcbp->rdsc[i].rspAddrHigh = 0;
pcbp->rdsc[i].cmdAddrLow = 0;
pcbp->rdsc[i].rspAddrLow = 0;
pring->cmdringaddr = NULL;
pring->rspringaddr = NULL;
continue;
}
/* Command ring setup for ring */
pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
(uint8_t *) phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
/* Response ring setup for ring */
pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
(uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numRiocb;
}
}
/**
* lpfc_read_rev - Prepare a mailbox command for reading HBA revision
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The read revision mailbox command is used to read the revision levels of
* the HBA components. These components include hardware units, resident
* firmware, and available firmware. HBAs that supports SLI-3 mode of
* operation provide different response information depending on the version
* requested by the driver.
*
* This routine prepares the mailbox command for reading HBA revision
* information.
**/
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRdRev.cv = 1;
mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
mb->mbxCommand = MBX_READ_REV;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2
* @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
* @hbq_desc: pointer to the HBQ selection profile descriptor.
*
* The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
* tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
* the Sequence Length Test using the fields in the Selection Profile 2
* extension in words 20:31.
**/
static void
lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
{
hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
}
/**
* lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3
* @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
* @hbq_desc: pointer to the HBQ selection profile descriptor.
*
* The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
* tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
* the Sequence Length Test and Byte Field Test using the fields in the
* Selection Profile 3 extension in words 20:31.
**/
static void
lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
{
hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
sizeof(hbqmb->profiles.profile3.cmdmatch));
}
/**
* lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5
* @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
* @hbq_desc: pointer to the HBQ selection profile descriptor.
*
* The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
* HBA tests the initial frame of an incoming sequence using the frame's
* R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
* and Byte Field Test using the fields in the Selection Profile 5 extension
* words 20:31.
**/
static void
lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
{
hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
sizeof(hbqmb->profiles.profile5.cmdmatch));
}
/**
* lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ
* @phba: pointer to lpfc hba data structure.
* @id: HBQ identifier.
* @hbq_desc: pointer to the HBA descriptor data structure.
* @hbq_entry_index: index of the HBQ entry data structures.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure HBQ (Host Buffer Queue) mailbox command is used to configure
* an HBQ. The configuration binds events that require buffers to a particular
* ring and HBQ based on a selection profile.
*
* This routine prepares the mailbox command for configuring an HBQ.
**/
void
lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
struct lpfc_hbq_init *hbq_desc,
uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
{
int i;
MAILBOX_t *mb = &pmb->u.mb;
struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
hbqmb->hbqId = id;
hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
hbqmb->recvNotify = hbq_desc->rn; /* Receive
* Notification */
hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
* # in words 0-19 */
hbqmb->profile = hbq_desc->profile; /* Selection profile:
* 0 = all,
* 7 = logentry */
hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
* e.g. Ring0=b0001,
* ring2=b0100 */
hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
* or 5 */
hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
* HBQ will be used
* for LogEntry
* buffers */
hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
hbq_entry_index * sizeof(struct lpfc_hbq_entry);
hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
mb->mbxCommand = MBX_CONFIG_HBQ;
mb->mbxOwner = OWN_HOST;
/* Copy info for profiles 2,3,5. Other
* profiles this area is reserved
*/
if (hbq_desc->profile == 2)
lpfc_build_hbq_profile2(hbqmb, hbq_desc);
else if (hbq_desc->profile == 3)
lpfc_build_hbq_profile3(hbqmb, hbq_desc);
else if (hbq_desc->profile == 5)
lpfc_build_hbq_profile5(hbqmb, hbq_desc);
/* Return if no rctl / type masks for this HBQ */
if (!hbq_desc->mask_count)
return;
/* Otherwise we setup specific rctl / type masks for this HBQ */
for (i = 0; i < hbq_desc->mask_count; i++) {
hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
}
return;
}
/**
* lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring
* @phba: pointer to lpfc hba data structure.
* @ring:
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure ring mailbox command is used to configure an IOCB ring. This
* configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
* ring. This is used to map incoming sequences to a particular ring whose
* RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
* attempt to configure a ring whose number is greater than the number
* specified in the Port Control Block (PCB). It is an error to issue the
* configure ring command more than once with the same ring number. The HBA
* returns an error if the driver attempts this.
*
* This routine prepares the mailbox command for configuring IOCB ring.
**/
void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
int i;
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varCfgRing.ring = ring;
mb->un.varCfgRing.maxOrigXchg = 0;
mb->un.varCfgRing.maxRespXchg = 0;
mb->un.varCfgRing.recvNotify = 1;
psli = &phba->sli;
pring = &psli->ring[ring];
mb->un.varCfgRing.numMask = pring->num_mask;
mb->mbxCommand = MBX_CONFIG_RING;
mb->mbxOwner = OWN_HOST;
/* Is this ring configured for a specific profile */
if (pring->prt[0].profile) {
mb->un.varCfgRing.profile = pring->prt[0].profile;
return;
}
/* Otherwise we setup specific rctl / type masks for this ring */
for (i = 0; i < pring->num_mask; i++) {
mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ)
mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
else
mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
}
return;
}
/**
* lpfc_config_port - Prepare a mailbox command for configuring port
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The configure port mailbox command is used to identify the Port Control
* Block (PCB) in the driver memory. After this command is issued, the
* driver must not access the mailbox in the HBA without first resetting
* the HBA. The HBA may copy the PCB information to internal storage for
* subsequent use; the driver can not change the PCB information unless it
* resets the HBA.
*
* This routine prepares the mailbox command for configuring port.
**/
void
lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
MAILBOX_t *mb = &pmb->u.mb;
dma_addr_t pdma_addr;
uint32_t bar_low, bar_high;
size_t offset;
struct lpfc_hgp hgp;
int i;
uint32_t pgp_offset;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_CONFIG_PORT;
mb->mbxOwner = OWN_HOST;
mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
/* Always Host Group Pointer is in SLIM */
mb->un.varCfgPort.hps = 1;
/* If HBA supports SLI=3 ask for it */
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
if (phba->max_vpi && phba->cfg_enable_npiv &&
phba->vpd.sli3Feat.cmv) {
mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
mb->un.varCfgPort.cmv = 1;
} else
mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
} else
phba->sli_rev = LPFC_SLI_REV2;
mb->un.varCfgPort.sli_mode = phba->sli_rev;
/* Now setup pcb */
phba->pcb->type = TYPE_NATIVE_SLI2;
phba->pcb->feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
/*
* Setup Host Group ring pointer.
*
* For efficiency reasons, the ring get/put pointers can be
* placed in adapter memory (SLIM) rather than in host memory.
* This allows firmware to avoid PCI reads/writes when updating
* and checking pointers.
*
* The firmware recognizes the use of SLIM memory by comparing
* the address of the get/put pointers structure with that of
* the SLIM BAR (BAR0).
*
* Caution: be sure to use the PCI config space value of BAR0/BAR1
* (the hardware's view of the base address), not the OS's
* value of pci_resource_start() as the OS value may be a cookie
* for ioremap/iomap.
*/
pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
/*
* Set up HGP - Port Memory
*
* The port expects the host get/put pointers to reside in memory
* following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
* area of SLIM. In SLI-2 mode, there's an additional 16 reserved
* words (0x40 bytes). This area is not reserved if HBQs are
* configured in SLI-3.
*
* CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
* RR0Get 0xc4 0x84
* CR1Put 0xc8 0x88
* RR1Get 0xcc 0x8c
* CR2Put 0xd0 0x90
* RR2Get 0xd4 0x94
* CR3Put 0xd8 0x98
* RR3Get 0xdc 0x9c
*
* Reserved 0xa0-0xbf
* If HBQs configured:
* HBQ 0 Put ptr 0xc0
* HBQ 1 Put ptr 0xc4
* HBQ 2 Put ptr 0xc8
* ......
* HBQ(M-1)Put Pointer 0xc0+(M-1)*4
*
*/
if (phba->sli_rev == 3) {
phba->host_gp = &mb_slim->us.s3.host[0];
phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
} else {
phba->host_gp = &mb_slim->us.s2.host[0];
phba->hbq_put = NULL;
}
/* mask off BAR0's flag bits 0 - 3 */
phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
(void __iomem *)phba->host_gp -
(void __iomem *)phba->MBslimaddr;
if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
phba->pcb->hgpAddrHigh = bar_high;
else
phba->pcb->hgpAddrHigh = 0;
/* write HGP data to SLIM at the required longword offset */
memset(&hgp, 0, sizeof(struct lpfc_hgp));
for (i=0; i < phba->sli.num_rings; i++) {
lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
sizeof(*phba->host_gp));
}
/* Setup Port Group offset */
if (phba->sli_rev == 3)
pgp_offset = offsetof(struct lpfc_sli2_slim,
mbx.us.s3_pgp.port);
else
pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
pdma_addr = phba->slim2p.phys + pgp_offset;
phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
/* Use callback routine to setp rings in the pcb */
lpfc_config_pcb_setup(phba);
/* special handling for LC HBAs */
if (lpfc_is_LC_HBA(phba->pcidev->device)) {
uint32_t hbainit[5];
lpfc_hba_init(phba, hbainit);
memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
}
/* Swap PCB if needed */
lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
}
/**
* lpfc_kill_board - Prepare a mailbox command for killing board
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
*
* The kill board mailbox command is used to tell firmware to perform a
* graceful shutdown of a channel on a specified board to prepare for reset.
* When the kill board mailbox command is received, the ER3 bit is set to 1
* in the Host Status register and the ER Attention bit is set to 1 in the
* Host Attention register of the HBA function that received the kill board
* command.
*
* This routine prepares the mailbox command for killing the board in
* preparation for a graceful shutdown.
**/
void
lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_KILL_BOARD;
mb->mbxOwner = OWN_HOST;
return;
}
/**
* lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* Driver maintains a internal mailbox command queue implemented as a linked
* list. When a mailbox command is issued, it shall be put into the mailbox
* command queue such that they shall be processed orderly as HBA can process
* one mailbox command at a time.
**/
void
lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
struct lpfc_sli *psli;
psli = &phba->sli;
list_add_tail(&mbq->list, &psli->mboxq);
psli->mboxq_cnt++;
return;
}
/**
* lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue
* @phba: pointer to lpfc hba data structure.
*
* Driver maintains a internal mailbox command queue implemented as a linked
* list. When a mailbox command is issued, it shall be put into the mailbox
* command queue such that they shall be processed orderly as HBA can process
* one mailbox command at a time. After HBA finished processing a mailbox
* command, the driver will remove a pending mailbox command from the head of
* the mailbox command queue and send to the HBA for processing.
*
* Return codes
* pointer to the driver internal queue element for mailbox command.
**/
LPFC_MBOXQ_t *
lpfc_mbox_get(struct lpfc_hba * phba)
{
LPFC_MBOXQ_t *mbq = NULL;
struct lpfc_sli *psli = &phba->sli;
list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
if (mbq)
psli->mboxq_cnt--;
return mbq;
}
/**
* __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* This routine put the completed mailbox command into the mailbox command
* complete list. This is the unlocked version of the routine. The mailbox
* complete list is used by the driver worker thread to process mailbox
* complete callback functions outside the driver interrupt handler.
**/
void
__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
{
list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
}
/**
* lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
* @phba: pointer to lpfc hba data structure.
* @mbq: pointer to the driver internal queue element for mailbox command.
*
* This routine put the completed mailbox command into the mailbox command
* complete list. This is the locked version of the routine. The mailbox
* complete list is used by the driver worker thread to process mailbox
* complete callback functions outside the driver interrupt handler.
**/
void
lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
{
unsigned long iflag;
/* This function expects to be called from interrupt context */
spin_lock_irqsave(&phba->hbalock, iflag);
__lpfc_mbox_cmpl_put(phba, mbq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
/**
* lpfc_mbox_cmd_check - Check the validality of a mailbox command
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to the driver internal queue element for mailbox command.
*
* This routine is to check whether a mailbox command is valid to be issued.
* This check will be performed by both the mailbox issue API when a client
* is to issue a mailbox command to the mailbox transport.
*
* Return 0 - pass the check, -ENODEV - fail the check
**/
int
lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
/* Mailbox command that have a completion handler must also have a
* vport specified.
*/
if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
if (!mboxq->vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1814 Mbox x%x failed, no vport\n",
mboxq->u.mb.mbxCommand);
dump_stack();
return -ENODEV;
}
}
return 0;
}
/**
* lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
* @phba: pointer to lpfc hba data structure.
*
* This routine is to check whether the HBA device is ready for posting a
* mailbox command. It is used by the mailbox transport API at the time the
* to post a mailbox command to the device.
*
* Return 0 - pass the check, -ENODEV - fail the check
**/
int
lpfc_mbox_dev_check(struct lpfc_hba *phba)
{
/* If the PCI channel is in offline state, do not issue mbox */
if (unlikely(pci_channel_offline(phba->pcidev)))
return -ENODEV;
/* If the HBA is in error state, do not issue mbox */
if (phba->link_state == LPFC_HBA_ERROR)
return -ENODEV;
return 0;
}
/**
* lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
* @phba: pointer to lpfc hba data structure.
* @cmd: mailbox command code.
*
* This routine retrieves the proper timeout value according to the mailbox
* command code.
*
* Return codes
* Timeout value to be used for the given mailbox command
**/
int
lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
{
switch (cmd) {
case MBX_WRITE_NV: /* 0x03 */
case MBX_UPDATE_CFG: /* 0x1B */
case MBX_DOWN_LOAD: /* 0x1C */
case MBX_DEL_LD_ENTRY: /* 0x1D */
case MBX_LOAD_AREA: /* 0x81 */
case MBX_WRITE_WWN: /* 0x98 */
case MBX_LOAD_EXP_ROM: /* 0x9C */
return LPFC_MBOX_TMO_FLASH_CMD;
case MBX_SLI4_CONFIG: /* 0x9b */
return LPFC_MBOX_SLI4_CONFIG_TMO;
}
return LPFC_MBOX_TMO;
}
/**
* lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
* @mbox: pointer to lpfc mbox command.
* @sgentry: sge entry index.
* @phyaddr: physical address for the sge
* @length: Length of the sge.
*
* This routine sets up an entry in the non-embedded mailbox command at the sge
* index location.
**/
void
lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
dma_addr_t phyaddr, uint32_t length)
{
struct lpfc_mbx_nembed_cmd *nembed_sge;
nembed_sge = (struct lpfc_mbx_nembed_cmd *)
&mbox->u.mqe.un.nembed_cmd;
nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
nembed_sge->sge[sgentry].length = length;
}
/**
* lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
* @mbox: pointer to lpfc mbox command.
* @sgentry: sge entry index.
*
* This routine gets an entry from the non-embedded mailbox command at the sge
* index location.
**/
void
lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
struct lpfc_mbx_sge *sge)
{
struct lpfc_mbx_nembed_cmd *nembed_sge;
nembed_sge = (struct lpfc_mbx_nembed_cmd *)
&mbox->u.mqe.un.nembed_cmd;
sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
sge->length = nembed_sge->sge[sgentry].length;
}
/**
* lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
*
* This routine frees SLI4 specific mailbox command for sending IOCTL command.
**/
void
lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_mbx_sli4_config *sli4_cfg;
struct lpfc_mbx_sge sge;
dma_addr_t phyaddr;
uint32_t sgecount, sgentry;
sli4_cfg = &mbox->u.mqe.un.sli4_config;
/* For embedded mbox command, just free the mbox command */
if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
mempool_free(mbox, phba->mbox_mem_pool);
return;
}
/* For non-embedded mbox command, we need to free the pages first */
sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
/* There is nothing we can do if there is no sge address array */
if (unlikely(!mbox->sge_array)) {
mempool_free(mbox, phba->mbox_mem_pool);
return;
}
/* Each non-embedded DMA memory was allocated in the length of a page */
for (sgentry = 0; sgentry < sgecount; sgentry++) {
lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
mbox->sge_array->addr[sgentry], phyaddr);
}
/* Free the sge address array memory */
kfree(mbox->sge_array);
/* Finally, free the mailbox command itself */
mempool_free(mbox, phba->mbox_mem_pool);
}
/**
* lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
* @subsystem: The sli4 config sub mailbox subsystem.
* @opcode: The sli4 config sub mailbox command opcode.
* @length: Length of the sli4 config mailbox command.
*
* This routine sets up the header fields of SLI4 specific mailbox command
* for sending IOCTL command.
*
* Return: the actual length of the mbox command allocated (mostly useful
* for none embedded mailbox command).
**/
int
lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
{
struct lpfc_mbx_sli4_config *sli4_config;
union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
uint32_t alloc_len;
uint32_t resid_len;
uint32_t pagen, pcount;
void *viraddr;
dma_addr_t phyaddr;
/* Set up SLI4 mailbox command header fields */
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
/* Set up SLI4 ioctl command header fields */
sli4_config = &mbox->u.mqe.un.sli4_config;
/* Setup for the embedded mbox command */
if (emb) {
/* Set up main header fields */
bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
sli4_config->header.cfg_mhdr.payload_length =
LPFC_MBX_CMD_HDR_LENGTH + length;
/* Set up sub-header fields following main header */
bf_set(lpfc_mbox_hdr_opcode,
&sli4_config->header.cfg_shdr.request, opcode);
bf_set(lpfc_mbox_hdr_subsystem,
&sli4_config->header.cfg_shdr.request, subsystem);
sli4_config->header.cfg_shdr.request.request_length = length;
return length;
}
/* Setup for the none-embedded mbox command */
pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
GFP_KERNEL);
if (!mbox->sge_array)
return 0;
for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
/* The DMA memory is always allocated in the length of a
* page even though the last SGE might not fill up to a
* page, this is used as a priori size of PAGE_SIZE for
* the later DMA memory free.
*/
viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
&phyaddr, GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
memset(viraddr, 0, PAGE_SIZE);
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
resid_len = length - alloc_len;
if (resid_len > PAGE_SIZE) {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
PAGE_SIZE);
alloc_len += PAGE_SIZE;
} else {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
resid_len);
alloc_len = length;
}
}
/* Set up main header fields in mailbox command */
sli4_config->header.cfg_mhdr.payload_length = alloc_len;
bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
/* Set up sub-header fields into the first page */
if (pagen > 0) {
bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
cfg_shdr->request.request_length =
alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
}
/* The sub-header is in DMA memory, which needs endian converstion */
lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
}
/**
* lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
*
* This routine gets the opcode from a SLI4 specific mailbox command for
* sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
* (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
* returned.
**/
uint8_t
lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_mbx_sli4_config *sli4_cfg;
union lpfc_sli4_cfg_shdr *cfg_shdr;
if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
return 0;
sli4_cfg = &mbox->u.mqe.un.sli4_config;
/* For embedded mbox command, get opcode from embedded sub-header*/
if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
}
/* For non-embedded mbox command, get opcode from first dma page */
if (unlikely(!mbox->sge_array))
return 0;
cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
}
/**
* lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
* @mboxq: pointer to lpfc mbox command.
*
* This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
* mailbox command.
**/
void
lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
{
/* Set up SLI4 mailbox command header fields */
memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
if (phba->cfg_enable_fip)
bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
else
bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)
bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable NPIV only if configured to do so. */
if (phba->max_vpi && phba->cfg_enable_npiv)
bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
return;
}
/**
* lpfc_init_vfi - Initialize the INIT_VFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @vport: Vport associated with the VF.
*
* This routine initializes @mbox to all zeros and then fills in the mailbox
* fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
* in the context of an FCF. The driver issues this command to setup a VFI
* before issuing a FLOGI to login to the VSAN. The driver should also issue a
* REG_VFI after a successful VSAN login.
**/
void
lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
{
struct lpfc_mbx_init_vfi *init_vfi;
memset(mbox, 0, sizeof(*mbox));
init_vfi = &mbox->u.mqe.un.init_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
bf_set(lpfc_init_vfi_vr, init_vfi, 1);
bf_set(lpfc_init_vfi_vt, init_vfi, 1);
bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
}
/**
* lpfc_reg_vfi - Initialize the REG_VFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @vport: vport associated with the VF.
* @phys: BDE DMA bus address used to send the service parameters to the HBA.
*
* This routine initializes @mbox to all zeros and then fills in the mailbox
* fields from @vport, and uses @buf as a DMAable buffer to send the vport's
* fc service parameters to the HBA for this VFI. REG_VFI configures virtual
* fabrics identified by VFI in the context of an FCF.
**/
void
lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
{
struct lpfc_mbx_reg_vfi *reg_vfi;
memset(mbox, 0, sizeof(*mbox));
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
reg_vfi->bde.addrHigh = putPaddrHigh(phys);
reg_vfi->bde.addrLow = putPaddrLow(phys);
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
}
/**
* lpfc_init_vpi - Initialize the INIT_VPI mailbox command
* @phba: pointer to the hba structure to init the VPI for.
* @mbox: pointer to lpfc mbox command to initialize.
* @vpi: VPI to be initialized.
*
* The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
* command to activate a virtual N_Port. The HBA assigns a MAC address to use
* with the virtual N Port. The SLI Host issues this command before issuing a
* FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
* successful virtual NPort login.
**/
void
lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
vpi + phba->vpi_base);
bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
phba->pport->vfi + phba->vfi_base);
}
/**
* lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @vfi: VFI to be unregistered.
*
* The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
* (logical NPort) into the inactive state. The SLI Host must have logged out
* and unregistered all remote N_Ports to abort any activity on the virtual
* fabric. The SLI Port posts the mailbox response after marking the virtual
* fabric inactive.
**/
void
lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
}
/**
* lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
* @phba: pointer to the hba structure containing.
* @mbox: pointer to lpfc mbox command to initialize.
*
* This function create a SLI4 dump mailbox command to dump FCoE
* parameters stored in region 23.
**/
int
lpfc_dump_fcoe_param(struct lpfc_hba *phba,
struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
memset(mbox, 0, sizeof(*mbox));
mb = &mbox->u.mb;
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (mp)
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp || !mp->virt) {
kfree(mp);
/* dump_fcoe_param failed to allocate memory */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"2569 lpfc_dump_fcoe_param: memory"
" allocation failed\n");
return 1;
}
memset(mp->virt, 0, LPFC_BPL_SIZE);
INIT_LIST_HEAD(&mp->list);
/* save address for completion */
mbox->context1 = (uint8_t *) mp;
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.region_id = DMP_REGION_23;
mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
return 0;
}
/**
* lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
* @phba: pointer to the hba structure containing the FCF index and RQ ID.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
* SLI Host uses the command to activate an FCF after it has acquired FCF
* information via a READ_FCF mailbox command. This mailbox command also is used
* to indicate where received unsolicited frames from this FCF will be sent. By
* default this routine will set up the FCF to forward all unsolicited frames
* the the RQ ID passed in the @phba. This can be overridden by the caller for
* more complicated setups.
**/
void
lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_mbx_reg_fcfi *reg_fcfi;
memset(mbox, 0, sizeof(*mbox));
reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
(~phba->fcf.addr_mode) & 0x3);
if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
}
}
/**
* lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @fcfi: FCFI to be unregistered.
*
* The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
* The SLI Host uses the command to inactivate an FCFI.
**/
void
lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
{
memset(mbox, 0, sizeof(*mbox));
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
}
/**
* lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @ndlp: The nodelist structure that describes the RPI to resume.
*
* The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
* link event.
**/
void
lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
{
struct lpfc_mbx_resume_rpi *resume_rpi;
memset(mbox, 0, sizeof(*mbox));
resume_rpi = &mbox->u.mqe.un.resume_rpi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
| gpl-2.0 |
dirtyspark23/android_kernel | drivers/scsi/stex.c | 465 | 41080 | /*
* SuperTrak EX Series Storage Controller driver for Linux
*
* Copyright (C) 2005-2009 Promise Technology Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Written By:
* Ed Lin <promise_linux@promise.com>
*
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#define DRV_NAME "stex"
#define ST_DRIVER_VERSION "4.6.0000.3"
#define ST_VER_MAJOR 4
#define ST_VER_MINOR 6
#define ST_OEM 0
#define ST_BUILD_VER 3
enum {
/* MU register offset */
IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
IDBL = 0x20, /* MU_INBOUND_DOORBELL */
IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
YIOA_STATUS = 0x00,
YH2I_INT = 0x20,
YINT_EN = 0x34,
YI2H_INT = 0x9c,
YI2H_INT_C = 0xa0,
YH2I_REQ = 0xc0,
YH2I_REQ_HI = 0xc4,
/* MU register value */
MU_INBOUND_DOORBELL_HANDSHAKE = 1,
MU_INBOUND_DOORBELL_REQHEADCHANGED = 2,
MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4,
MU_INBOUND_DOORBELL_HMUSTOPPED = 8,
MU_INBOUND_DOORBELL_RESET = 16,
MU_OUTBOUND_DOORBELL_HANDSHAKE = 1,
MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2,
MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4,
MU_OUTBOUND_DOORBELL_BUSCHANGE = 8,
MU_OUTBOUND_DOORBELL_HASEVENT = 16,
/* MU status code */
MU_STATE_STARTING = 1,
MU_STATE_FMU_READY_FOR_HANDSHAKE = 2,
MU_STATE_SEND_HANDSHAKE_FRAME = 3,
MU_STATE_STARTED = 4,
MU_STATE_RESETTING = 5,
MU_MAX_DELAY = 120,
MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
MU_HARD_RESET_WAIT = 30000,
HMU_PARTNER_TYPE = 2,
/* firmware returned values */
SRB_STATUS_SUCCESS = 0x01,
SRB_STATUS_ERROR = 0x04,
SRB_STATUS_BUSY = 0x05,
SRB_STATUS_INVALID_REQUEST = 0x06,
SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
SRB_SEE_SENSE = 0x80,
/* task attribute */
TASK_ATTRIBUTE_SIMPLE = 0x0,
TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
TASK_ATTRIBUTE_ORDERED = 0x2,
TASK_ATTRIBUTE_ACA = 0x4,
SS_STS_NORMAL = 0x80000000,
SS_STS_DONE = 0x40000000,
SS_STS_HANDSHAKE = 0x20000000,
SS_HEAD_HANDSHAKE = 0x80,
SS_H2I_INT_RESET = 0x100,
SS_MU_OPERATIONAL = 0x80000000,
STEX_CDB_LENGTH = 16,
STATUS_VAR_LEN = 128,
/* sg flags */
SG_CF_EOT = 0x80, /* end of table */
SG_CF_64B = 0x40, /* 64 bit item */
SG_CF_HOST = 0x20, /* sg in host memory */
MSG_DATA_DIR_ND = 0,
MSG_DATA_DIR_IN = 1,
MSG_DATA_DIR_OUT = 2,
st_shasta = 0,
st_vsc = 1,
st_yosemite = 2,
st_seq = 3,
st_yel = 4,
PASSTHRU_REQ_TYPE = 0x00000001,
PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
ST_INTERNAL_TIMEOUT = 180,
ST_TO_CMD = 0,
ST_FROM_CMD = 1,
/* vendor specific commands of Promise */
MGT_CMD = 0xd8,
SINBAND_MGT_CMD = 0xd9,
ARRAY_CMD = 0xe0,
CONTROLLER_CMD = 0xe1,
DEBUGGING_CMD = 0xe2,
PASSTHRU_CMD = 0xe3,
PASSTHRU_GET_ADAPTER = 0x05,
PASSTHRU_GET_DRVVER = 0x10,
CTLR_CONFIG_CMD = 0x03,
CTLR_SHUTDOWN = 0x0d,
CTLR_POWER_STATE_CHANGE = 0x0e,
CTLR_POWER_SAVING = 0x01,
PASSTHRU_SIGNATURE = 0x4e415041,
MGT_CMD_SIGNATURE = 0xba,
INQUIRY_EVPD = 0x01,
ST_ADDITIONAL_MEM = 0x200000,
};
struct st_sgitem {
u8 ctrl; /* SG_CF_xxx */
u8 reserved[3];
__le32 count;
__le64 addr;
};
struct st_ss_sgitem {
__le32 addr;
__le32 addr_hi;
__le32 count;
};
struct st_sgtable {
__le16 sg_count;
__le16 max_sg_count;
__le32 sz_in_byte;
};
struct st_msg_header {
__le64 handle;
u8 flag;
u8 channel;
__le16 timeout;
u32 reserved;
};
struct handshake_frame {
__le64 rb_phy; /* request payload queue physical address */
__le16 req_sz; /* size of each request payload */
__le16 req_cnt; /* count of reqs the buffer can hold */
__le16 status_sz; /* size of each status payload */
__le16 status_cnt; /* count of status the buffer can hold */
__le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */
u8 partner_type; /* who sends this frame */
u8 reserved0[7];
__le32 partner_ver_major;
__le32 partner_ver_minor;
__le32 partner_ver_oem;
__le32 partner_ver_build;
__le32 extra_offset; /* NEW */
__le32 extra_size; /* NEW */
__le32 scratch_size;
u32 reserved1;
};
struct req_msg {
__le16 tag;
u8 lun;
u8 target;
u8 task_attr;
u8 task_manage;
u8 data_dir;
u8 payload_sz; /* payload size in 4-byte, not used */
u8 cdb[STEX_CDB_LENGTH];
u32 variable[0];
};
struct status_msg {
__le16 tag;
u8 lun;
u8 target;
u8 srb_status;
u8 scsi_status;
u8 reserved;
u8 payload_sz; /* payload size in 4-byte */
u8 variable[STATUS_VAR_LEN];
};
struct ver_info {
u32 major;
u32 minor;
u32 oem;
u32 build;
u32 reserved[2];
};
struct st_frame {
u32 base[6];
u32 rom_addr;
struct ver_info drv_ver;
struct ver_info bios_ver;
u32 bus;
u32 slot;
u32 irq_level;
u32 irq_vec;
u32 id;
u32 subid;
u32 dimm_size;
u8 dimm_type;
u8 reserved[3];
u32 channel;
u32 reserved1;
};
struct st_drvver {
u32 major;
u32 minor;
u32 oem;
u32 build;
u32 signature[2];
u8 console_id;
u8 host_no;
u8 reserved0[2];
u32 reserved[3];
};
struct st_ccb {
struct req_msg *req;
struct scsi_cmnd *cmd;
void *sense_buffer;
unsigned int sense_bufflen;
int sg_count;
u32 req_type;
u8 srb_status;
u8 scsi_status;
u8 reserved[2];
};
struct st_hba {
void __iomem *mmio_base; /* iomapped PCI memory space */
void *dma_mem;
dma_addr_t dma_handle;
size_t dma_size;
struct Scsi_Host *host;
struct pci_dev *pdev;
struct req_msg * (*alloc_rq) (struct st_hba *);
int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
void (*send) (struct st_hba *, struct req_msg *, u16);
u32 req_head;
u32 req_tail;
u32 status_head;
u32 status_tail;
struct status_msg *status_buffer;
void *copy_buffer; /* temp buffer for driver-handled commands */
struct st_ccb *ccb;
struct st_ccb *wait_ccb;
__le32 *scratch;
unsigned int mu_status;
unsigned int cardtype;
int msi_enabled;
int out_req_cnt;
u32 extra_offset;
u16 rq_count;
u16 rq_size;
u16 sts_count;
};
struct st_card_info {
struct req_msg * (*alloc_rq) (struct st_hba *);
int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
void (*send) (struct st_hba *, struct req_msg *, u16);
unsigned int max_id;
unsigned int max_lun;
unsigned int max_channel;
u16 rq_count;
u16 rq_size;
u16 sts_count;
};
static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
static const char console_inq_page[] =
{
0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
};
MODULE_AUTHOR("Ed Lin");
MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
MODULE_LICENSE("GPL");
MODULE_VERSION(ST_DRIVER_VERSION);
static void stex_gettime(__le64 *time)
{
struct timeval tv;
do_gettimeofday(&tv);
*time = cpu_to_le64(tv.tv_sec);
}
static struct status_msg *stex_get_status(struct st_hba *hba)
{
struct status_msg *status = hba->status_buffer + hba->status_tail;
++hba->status_tail;
hba->status_tail %= hba->sts_count+1;
return status;
}
static void stex_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/* "Invalid field in cdb" */
scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
0x0);
done(cmd);
}
static struct req_msg *stex_alloc_req(struct st_hba *hba)
{
struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
++hba->req_head;
hba->req_head %= hba->rq_count+1;
return req;
}
static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
{
return (struct req_msg *)(hba->dma_mem +
hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
}
static int stex_map_sg(struct st_hba *hba,
struct req_msg *req, struct st_ccb *ccb)
{
struct scsi_cmnd *cmd;
struct scatterlist *sg;
struct st_sgtable *dst;
struct st_sgitem *table;
int i, nseg;
cmd = ccb->cmd;
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
if (nseg) {
dst = (struct st_sgtable *)req->variable;
ccb->sg_count = nseg;
dst->sg_count = cpu_to_le16((u16)nseg);
dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
table = (struct st_sgitem *)(dst + 1);
scsi_for_each_sg(cmd, sg, nseg, i) {
table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
table[i].addr = cpu_to_le64(sg_dma_address(sg));
table[i].ctrl = SG_CF_64B | SG_CF_HOST;
}
table[--i].ctrl |= SG_CF_EOT;
}
return nseg;
}
static int stex_ss_map_sg(struct st_hba *hba,
struct req_msg *req, struct st_ccb *ccb)
{
struct scsi_cmnd *cmd;
struct scatterlist *sg;
struct st_sgtable *dst;
struct st_ss_sgitem *table;
int i, nseg;
cmd = ccb->cmd;
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
if (nseg) {
dst = (struct st_sgtable *)req->variable;
ccb->sg_count = nseg;
dst->sg_count = cpu_to_le16((u16)nseg);
dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
table = (struct st_ss_sgitem *)(dst + 1);
scsi_for_each_sg(cmd, sg, nseg, i) {
table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
table[i].addr =
cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
table[i].addr_hi =
cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
}
}
return nseg;
}
static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
{
struct st_frame *p;
size_t count = sizeof(struct st_frame);
p = hba->copy_buffer;
scsi_sg_copy_to_buffer(ccb->cmd, p, count);
memset(p->base, 0, sizeof(u32)*6);
*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
p->rom_addr = 0;
p->drv_ver.major = ST_VER_MAJOR;
p->drv_ver.minor = ST_VER_MINOR;
p->drv_ver.oem = ST_OEM;
p->drv_ver.build = ST_BUILD_VER;
p->bus = hba->pdev->bus->number;
p->slot = hba->pdev->devfn;
p->irq_level = 0;
p->irq_vec = hba->pdev->irq;
p->id = hba->pdev->vendor << 16 | hba->pdev->device;
p->subid =
hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
scsi_sg_copy_from_buffer(ccb->cmd, p, count);
}
static void
stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
{
req->tag = cpu_to_le16(tag);
hba->ccb[tag].req = req;
hba->out_req_cnt++;
writel(hba->req_head, hba->mmio_base + IMR0);
writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
readl(hba->mmio_base + IDBL); /* flush */
}
static void
stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
{
struct scsi_cmnd *cmd;
struct st_msg_header *msg_h;
dma_addr_t addr;
req->tag = cpu_to_le16(tag);
hba->ccb[tag].req = req;
hba->out_req_cnt++;
cmd = hba->ccb[tag].cmd;
msg_h = (struct st_msg_header *)req - 1;
if (likely(cmd)) {
msg_h->channel = (u8)cmd->device->channel;
msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
}
addr = hba->dma_handle + hba->req_head * hba->rq_size;
addr += (hba->ccb[tag].sg_count+4)/11;
msg_h->handle = cpu_to_le64(addr);
++hba->req_head;
hba->req_head %= hba->rq_count+1;
writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
writel(addr, hba->mmio_base + YH2I_REQ);
readl(hba->mmio_base + YH2I_REQ); /* flush */
}
static int
stex_slave_alloc(struct scsi_device *sdev)
{
/* Cheat: usually extracted from Inquiry data */
sdev->tagged_supported = 1;
scsi_activate_tcq(sdev, sdev->host->can_queue);
return 0;
}
static int
stex_slave_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
sdev->tagged_supported = 1;
return 0;
}
static void
stex_slave_destroy(struct scsi_device *sdev)
{
scsi_deactivate_tcq(sdev, 1);
}
static int
stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
{
struct st_hba *hba;
struct Scsi_Host *host;
unsigned int id, lun;
struct req_msg *req;
u16 tag;
host = cmd->device->host;
id = cmd->device->id;
lun = cmd->device->lun;
hba = (struct st_hba *) &host->hostdata[0];
switch (cmd->cmnd[0]) {
case MODE_SENSE_10:
{
static char ms10_caching_page[12] =
{ 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
unsigned char page;
page = cmd->cmnd[2] & 0x3f;
if (page == 0x8 || page == 0x3f) {
scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
sizeof(ms10_caching_page));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
done(cmd);
} else
stex_invalid_field(cmd, done);
return 0;
}
case REPORT_LUNS:
/*
* The shasta firmware does not report actual luns in the
* target, so fail the command to force sequential lun scan.
* Also, the console device does not support this command.
*/
if (hba->cardtype == st_shasta || id == host->max_id - 1) {
stex_invalid_field(cmd, done);
return 0;
}
break;
case TEST_UNIT_READY:
if (id == host->max_id - 1) {
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
done(cmd);
return 0;
}
break;
case INQUIRY:
if (id != host->max_id - 1)
break;
if (!lun && !cmd->device->channel &&
(cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
sizeof(console_inq_page));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
done(cmd);
} else
stex_invalid_field(cmd, done);
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
struct st_drvver ver;
size_t cp_len = sizeof(ver);
ver.major = ST_VER_MAJOR;
ver.minor = ST_VER_MINOR;
ver.oem = ST_OEM;
ver.build = ST_BUILD_VER;
ver.signature[0] = PASSTHRU_SIGNATURE;
ver.console_id = host->max_id - 1;
ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
cmd->result = sizeof(ver) == cp_len ?
DID_OK << 16 | COMMAND_COMPLETE << 8 :
DID_ERROR << 16 | COMMAND_COMPLETE << 8;
done(cmd);
return 0;
}
default:
break;
}
cmd->scsi_done = done;
tag = cmd->request->tag;
if (unlikely(tag >= host->can_queue))
return SCSI_MLQUEUE_HOST_BUSY;
req = hba->alloc_rq(hba);
req->lun = lun;
req->target = id;
/* cdb */
memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
req->data_dir = MSG_DATA_DIR_IN;
else if (cmd->sc_data_direction == DMA_TO_DEVICE)
req->data_dir = MSG_DATA_DIR_OUT;
else
req->data_dir = MSG_DATA_DIR_ND;
hba->ccb[tag].cmd = cmd;
hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
hba->ccb[tag].sense_buffer = cmd->sense_buffer;
if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
hba->ccb[tag].sg_count = 0;
memset(&req->variable[0], 0, 8);
}
hba->send(hba, req, tag);
return 0;
}
static void stex_scsi_done(struct st_ccb *ccb)
{
struct scsi_cmnd *cmd = ccb->cmd;
int result;
if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
result = ccb->scsi_status;
switch (ccb->scsi_status) {
case SAM_STAT_GOOD:
result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
case SAM_STAT_CHECK_CONDITION:
result |= DRIVER_SENSE << 24;
break;
case SAM_STAT_BUSY:
result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
default:
result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
}
}
else if (ccb->srb_status & SRB_SEE_SENSE)
result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
else switch (ccb->srb_status) {
case SRB_STATUS_SELECTION_TIMEOUT:
result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUSY:
result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_ERROR:
default:
result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
}
cmd->result = result;
cmd->scsi_done(cmd);
}
static void stex_copy_data(struct st_ccb *ccb,
struct status_msg *resp, unsigned int variable)
{
if (resp->scsi_status != SAM_STAT_GOOD) {
if (ccb->sense_buffer != NULL)
memcpy(ccb->sense_buffer, resp->variable,
min(variable, ccb->sense_bufflen));
return;
}
if (ccb->cmd == NULL)
return;
scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
}
static void stex_check_cmd(struct st_hba *hba,
struct st_ccb *ccb, struct status_msg *resp)
{
if (ccb->cmd->cmnd[0] == MGT_CMD &&
resp->scsi_status != SAM_STAT_CHECK_CONDITION)
scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
le32_to_cpu(*(__le32 *)&resp->variable[0]));
}
static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
{
void __iomem *base = hba->mmio_base;
struct status_msg *resp;
struct st_ccb *ccb;
unsigned int size;
u16 tag;
if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
return;
/* status payloads */
hba->status_head = readl(base + OMR1);
if (unlikely(hba->status_head > hba->sts_count)) {
printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
pci_name(hba->pdev));
return;
}
/*
* it's not a valid status payload if:
* 1. there are no pending requests(e.g. during init stage)
* 2. there are some pending requests, but the controller is in
* reset status, and its type is not st_yosemite
* firmware of st_yosemite in reset status will return pending requests
* to driver, so we allow it to pass
*/
if (unlikely(hba->out_req_cnt <= 0 ||
(hba->mu_status == MU_STATE_RESETTING &&
hba->cardtype != st_yosemite))) {
hba->status_tail = hba->status_head;
goto update_status;
}
while (hba->status_tail != hba->status_head) {
resp = stex_get_status(hba);
tag = le16_to_cpu(resp->tag);
if (unlikely(tag >= hba->host->can_queue)) {
printk(KERN_WARNING DRV_NAME
"(%s): invalid tag\n", pci_name(hba->pdev));
continue;
}
hba->out_req_cnt--;
ccb = &hba->ccb[tag];
if (unlikely(hba->wait_ccb == ccb))
hba->wait_ccb = NULL;
if (unlikely(ccb->req == NULL)) {
printk(KERN_WARNING DRV_NAME
"(%s): lagging req\n", pci_name(hba->pdev));
continue;
}
size = resp->payload_sz * sizeof(u32); /* payload size */
if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
size > sizeof(*resp))) {
printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
pci_name(hba->pdev));
} else {
size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
if (size)
stex_copy_data(ccb, resp, size);
}
ccb->req = NULL;
ccb->srb_status = resp->srb_status;
ccb->scsi_status = resp->scsi_status;
if (likely(ccb->cmd != NULL)) {
if (hba->cardtype == st_yosemite)
stex_check_cmd(hba, ccb, resp);
if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
stex_controller_info(hba, ccb);
scsi_dma_unmap(ccb->cmd);
stex_scsi_done(ccb);
} else
ccb->req_type = 0;
}
update_status:
writel(hba->status_head, base + IMR1);
readl(base + IMR1); /* flush */
}
static irqreturn_t stex_intr(int irq, void *__hba)
{
struct st_hba *hba = __hba;
void __iomem *base = hba->mmio_base;
u32 data;
unsigned long flags;
int handled = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
data = readl(base + ODBL);
if (data && data != 0xffffffff) {
/* clear the interrupt */
writel(data, base + ODBL);
readl(base + ODBL); /* flush */
stex_mu_intr(hba, data);
handled = 1;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return IRQ_RETVAL(handled);
}
static void stex_ss_mu_intr(struct st_hba *hba)
{
struct status_msg *resp;
struct st_ccb *ccb;
__le32 *scratch;
unsigned int size;
int count = 0;
u32 value;
u16 tag;
if (unlikely(hba->out_req_cnt <= 0 ||
hba->mu_status == MU_STATE_RESETTING))
return;
while (count < hba->sts_count) {
scratch = hba->scratch + hba->status_tail;
value = le32_to_cpu(*scratch);
if (unlikely(!(value & SS_STS_NORMAL)))
return;
resp = hba->status_buffer + hba->status_tail;
*scratch = 0;
++count;
++hba->status_tail;
hba->status_tail %= hba->sts_count+1;
tag = (u16)value;
if (unlikely(tag >= hba->host->can_queue)) {
printk(KERN_WARNING DRV_NAME
"(%s): invalid tag\n", pci_name(hba->pdev));
continue;
}
hba->out_req_cnt--;
ccb = &hba->ccb[tag];
if (unlikely(hba->wait_ccb == ccb))
hba->wait_ccb = NULL;
if (unlikely(ccb->req == NULL)) {
printk(KERN_WARNING DRV_NAME
"(%s): lagging req\n", pci_name(hba->pdev));
continue;
}
ccb->req = NULL;
if (likely(value & SS_STS_DONE)) { /* normal case */
ccb->srb_status = SRB_STATUS_SUCCESS;
ccb->scsi_status = SAM_STAT_GOOD;
} else {
ccb->srb_status = resp->srb_status;
ccb->scsi_status = resp->scsi_status;
size = resp->payload_sz * sizeof(u32);
if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
size > sizeof(*resp))) {
printk(KERN_WARNING DRV_NAME
"(%s): bad status size\n",
pci_name(hba->pdev));
} else {
size -= sizeof(*resp) - STATUS_VAR_LEN;
if (size)
stex_copy_data(ccb, resp, size);
}
if (likely(ccb->cmd != NULL))
stex_check_cmd(hba, ccb, resp);
}
if (likely(ccb->cmd != NULL)) {
scsi_dma_unmap(ccb->cmd);
stex_scsi_done(ccb);
} else
ccb->req_type = 0;
}
}
static irqreturn_t stex_ss_intr(int irq, void *__hba)
{
struct st_hba *hba = __hba;
void __iomem *base = hba->mmio_base;
u32 data;
unsigned long flags;
int handled = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
data = readl(base + YI2H_INT);
if (data && data != 0xffffffff) {
/* clear the interrupt */
writel(data, base + YI2H_INT_C);
stex_ss_mu_intr(hba);
handled = 1;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return IRQ_RETVAL(handled);
}
static int stex_common_handshake(struct st_hba *hba)
{
void __iomem *base = hba->mmio_base;
struct handshake_frame *h;
dma_addr_t status_phys;
u32 data;
unsigned long before;
if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
readl(base + IDBL);
before = jiffies;
while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no handshake signature\n",
pci_name(hba->pdev));
return -1;
}
rmb();
msleep(1);
}
}
udelay(10);
data = readl(base + OMR1);
if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
data &= 0x0000ffff;
if (hba->host->can_queue > data) {
hba->host->can_queue = data;
hba->host->cmd_per_lun = data;
}
}
h = (struct handshake_frame *)hba->status_buffer;
h->rb_phy = cpu_to_le64(hba->dma_handle);
h->req_sz = cpu_to_le16(hba->rq_size);
h->req_cnt = cpu_to_le16(hba->rq_count+1);
h->status_sz = cpu_to_le16(sizeof(struct status_msg));
h->status_cnt = cpu_to_le16(hba->sts_count+1);
stex_gettime(&h->hosttime);
h->partner_type = HMU_PARTNER_TYPE;
if (hba->extra_offset) {
h->extra_offset = cpu_to_le32(hba->extra_offset);
h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
} else
h->extra_offset = h->extra_size = 0;
status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
writel(status_phys, base + IMR0);
readl(base + IMR0);
writel((status_phys >> 16) >> 16, base + IMR1);
readl(base + IMR1);
writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
readl(base + OMR0);
writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
readl(base + IDBL); /* flush */
udelay(10);
before = jiffies;
while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no signature after handshake frame\n",
pci_name(hba->pdev));
return -1;
}
rmb();
msleep(1);
}
writel(0, base + IMR0);
readl(base + IMR0);
writel(0, base + OMR0);
readl(base + OMR0);
writel(0, base + IMR1);
readl(base + IMR1);
writel(0, base + OMR1);
readl(base + OMR1); /* flush */
return 0;
}
static int stex_ss_handshake(struct st_hba *hba)
{
void __iomem *base = hba->mmio_base;
struct st_msg_header *msg_h;
struct handshake_frame *h;
__le32 *scratch;
u32 data;
unsigned long before;
int ret = 0;
before = jiffies;
while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): firmware not operational\n",
pci_name(hba->pdev));
return -1;
}
msleep(1);
}
msg_h = (struct st_msg_header *)hba->dma_mem;
msg_h->handle = cpu_to_le64(hba->dma_handle);
msg_h->flag = SS_HEAD_HANDSHAKE;
h = (struct handshake_frame *)(msg_h + 1);
h->rb_phy = cpu_to_le64(hba->dma_handle);
h->req_sz = cpu_to_le16(hba->rq_size);
h->req_cnt = cpu_to_le16(hba->rq_count+1);
h->status_sz = cpu_to_le16(sizeof(struct status_msg));
h->status_cnt = cpu_to_le16(hba->sts_count+1);
stex_gettime(&h->hosttime);
h->partner_type = HMU_PARTNER_TYPE;
h->extra_offset = h->extra_size = 0;
h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32));
data = readl(base + YINT_EN);
data &= ~4;
writel(data, base + YINT_EN);
writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
writel(hba->dma_handle, base + YH2I_REQ);
scratch = hba->scratch;
before = jiffies;
while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no signature after handshake frame\n",
pci_name(hba->pdev));
ret = -1;
break;
}
rmb();
msleep(1);
}
*scratch = 0;
msg_h->flag = 0;
return ret;
}
static int stex_handshake(struct st_hba *hba)
{
int err;
unsigned long flags;
err = (hba->cardtype == st_yel) ?
stex_ss_handshake(hba) : stex_common_handshake(hba);
if (err == 0) {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->req_head = 0;
hba->req_tail = 0;
hba->status_head = 0;
hba->status_tail = 0;
hba->out_req_cnt = 0;
hba->mu_status = MU_STATE_STARTED;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
return err;
}
static int stex_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct st_hba *hba = (struct st_hba *)host->hostdata;
u16 tag = cmd->request->tag;
void __iomem *base;
u32 data;
int result = SUCCESS;
unsigned long flags;
printk(KERN_INFO DRV_NAME
"(%s): aborting command\n", pci_name(hba->pdev));
scsi_print_command(cmd);
base = hba->mmio_base;
spin_lock_irqsave(host->host_lock, flags);
if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
hba->wait_ccb = &hba->ccb[tag];
else {
for (tag = 0; tag < host->can_queue; tag++)
if (hba->ccb[tag].cmd == cmd) {
hba->wait_ccb = &hba->ccb[tag];
break;
}
if (tag >= host->can_queue)
goto out;
}
if (hba->cardtype == st_yel) {
data = readl(base + YI2H_INT);
if (data == 0 || data == 0xffffffff)
goto fail_out;
writel(data, base + YI2H_INT_C);
stex_ss_mu_intr(hba);
} else {
data = readl(base + ODBL);
if (data == 0 || data == 0xffffffff)
goto fail_out;
writel(data, base + ODBL);
readl(base + ODBL); /* flush */
stex_mu_intr(hba, data);
}
if (hba->wait_ccb == NULL) {
printk(KERN_WARNING DRV_NAME
"(%s): lost interrupt\n", pci_name(hba->pdev));
goto out;
}
fail_out:
scsi_dma_unmap(cmd);
hba->wait_ccb->req = NULL; /* nullify the req's future return */
hba->wait_ccb = NULL;
result = FAILED;
out:
spin_unlock_irqrestore(host->host_lock, flags);
return result;
}
static void stex_hard_reset(struct st_hba *hba)
{
struct pci_bus *bus;
int i;
u16 pci_cmd;
u8 pci_bctl;
for (i = 0; i < 16; i++)
pci_read_config_dword(hba->pdev, i * 4,
&hba->pdev->saved_config_space[i]);
/* Reset secondary bus. Our controller(MU/ATU) is the only device on
secondary bus. Consult Intel 80331/3 developer's manual for detail */
bus = hba->pdev->bus;
pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
/*
* 1 ms may be enough for 8-port controllers. But 16-port controllers
* require more time to finish bus reset. Use 100 ms here for safety
*/
msleep(100);
pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
break;
msleep(1);
}
ssleep(5);
for (i = 0; i < 16; i++)
pci_write_config_dword(hba->pdev, i * 4,
hba->pdev->saved_config_space[i]);
}
static void stex_ss_reset(struct st_hba *hba)
{
writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
readl(hba->mmio_base + YH2I_INT);
ssleep(5);
}
static int stex_reset(struct scsi_cmnd *cmd)
{
struct st_hba *hba;
void __iomem *base;
unsigned long flags, before;
hba = (struct st_hba *) &cmd->device->host->hostdata[0];
printk(KERN_INFO DRV_NAME
"(%s): resetting host\n", pci_name(hba->pdev));
scsi_print_command(cmd);
hba->mu_status = MU_STATE_RESETTING;
if (hba->cardtype == st_shasta)
stex_hard_reset(hba);
else if (hba->cardtype == st_yel)
stex_ss_reset(hba);
if (hba->cardtype != st_yosemite) {
if (stex_handshake(hba)) {
printk(KERN_WARNING DRV_NAME
"(%s): resetting: handshake failed\n",
pci_name(hba->pdev));
return FAILED;
}
return SUCCESS;
}
/* st_yosemite */
writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL);
readl(hba->mmio_base + IDBL); /* flush */
before = jiffies;
while (hba->out_req_cnt > 0) {
if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
printk(KERN_WARNING DRV_NAME
"(%s): reset timeout\n", pci_name(hba->pdev));
return FAILED;
}
msleep(1);
}
base = hba->mmio_base;
writel(0, base + IMR0);
readl(base + IMR0);
writel(0, base + OMR0);
readl(base + OMR0);
writel(0, base + IMR1);
readl(base + IMR1);
writel(0, base + OMR1);
readl(base + OMR1); /* flush */
spin_lock_irqsave(hba->host->host_lock, flags);
hba->req_head = 0;
hba->req_tail = 0;
hba->status_head = 0;
hba->status_tail = 0;
hba->out_req_cnt = 0;
hba->mu_status = MU_STATE_STARTED;
spin_unlock_irqrestore(hba->host->host_lock, flags);
return SUCCESS;
}
static int stex_biosparam(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int geom[])
{
int heads = 255, sectors = 63;
if (capacity < 0x200000) {
heads = 64;
sectors = 32;
}
sector_div(capacity, heads * sectors);
geom[0] = heads;
geom[1] = sectors;
geom[2] = capacity;
return 0;
}
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = DRV_NAME,
.proc_name = DRV_NAME,
.bios_param = stex_biosparam,
.queuecommand = stex_queuecommand,
.slave_alloc = stex_slave_alloc,
.slave_configure = stex_slave_config,
.slave_destroy = stex_slave_destroy,
.eh_abort_handler = stex_abort,
.eh_host_reset_handler = stex_reset,
.this_id = -1,
};
static struct pci_device_id stex_pci_tbl[] = {
/* st_shasta */
{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX12350 */
{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX4350 */
{ 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX24350 */
/* st_vsc */
{ 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
/* st_yosemite */
{ 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
/* st_seq */
{ 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
/* st_yel */
{ 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
{ 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
{ } /* terminate list */
};
static struct st_card_info stex_card_info[] = {
/* st_shasta */
{
.max_id = 17,
.max_lun = 8,
.max_channel = 0,
.rq_count = 32,
.rq_size = 1048,
.sts_count = 32,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_vsc */
{
.max_id = 129,
.max_lun = 1,
.max_channel = 0,
.rq_count = 32,
.rq_size = 1048,
.sts_count = 32,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_yosemite */
{
.max_id = 2,
.max_lun = 256,
.max_channel = 0,
.rq_count = 256,
.rq_size = 1048,
.sts_count = 256,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_seq */
{
.max_id = 129,
.max_lun = 1,
.max_channel = 0,
.rq_count = 32,
.rq_size = 1048,
.sts_count = 32,
.alloc_rq = stex_alloc_req,
.map_sg = stex_map_sg,
.send = stex_send_cmd,
},
/* st_yel */
{
.max_id = 129,
.max_lun = 256,
.max_channel = 3,
.rq_count = 801,
.rq_size = 512,
.sts_count = 801,
.alloc_rq = stex_ss_alloc_req,
.map_sg = stex_ss_map_sg,
.send = stex_ss_send_cmd,
},
};
static int stex_set_dma_mask(struct pci_dev * pdev)
{
int ret;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
return 0;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!ret)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
return ret;
}
static int stex_request_irq(struct st_hba *hba)
{
struct pci_dev *pdev = hba->pdev;
int status;
if (msi) {
status = pci_enable_msi(pdev);
if (status != 0)
printk(KERN_ERR DRV_NAME
"(%s): error %d setting up MSI\n",
pci_name(pdev), status);
else
hba->msi_enabled = 1;
} else
hba->msi_enabled = 0;
status = request_irq(pdev->irq, hba->cardtype == st_yel ?
stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
if (status != 0) {
if (hba->msi_enabled)
pci_disable_msi(pdev);
}
return status;
}
static void stex_free_irq(struct st_hba *hba)
{
struct pci_dev *pdev = hba->pdev;
free_irq(pdev->irq, hba);
if (hba->msi_enabled)
pci_disable_msi(pdev);
}
static int __devinit
stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct st_hba *hba;
struct Scsi_Host *host;
const struct st_card_info *ci = NULL;
u32 sts_offset, cp_offset, scratch_offset;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_set_master(pdev);
host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
if (!host) {
printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
pci_name(pdev));
err = -ENOMEM;
goto out_disable;
}
hba = (struct st_hba *)host->hostdata;
memset(hba, 0, sizeof(struct st_hba));
err = pci_request_regions(pdev, DRV_NAME);
if (err < 0) {
printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
pci_name(pdev));
goto out_scsi_host_put;
}
hba->mmio_base = pci_ioremap_bar(pdev, 0);
if ( !hba->mmio_base) {
printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
pci_name(pdev));
err = -ENOMEM;
goto out_release_regions;
}
err = stex_set_dma_mask(pdev);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
pci_name(pdev));
goto out_iounmap;
}
hba->cardtype = (unsigned int) id->driver_data;
ci = &stex_card_info[hba->cardtype];
sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
if (hba->cardtype == st_yel)
sts_offset += (ci->sts_count+1) * sizeof(u32);
cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
hba->dma_size = cp_offset + sizeof(struct st_frame);
if (hba->cardtype == st_seq ||
(hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
hba->extra_offset = hba->dma_size;
hba->dma_size += ST_ADDITIONAL_MEM;
}
hba->dma_mem = dma_alloc_coherent(&pdev->dev,
hba->dma_size, &hba->dma_handle, GFP_KERNEL);
if (!hba->dma_mem) {
err = -ENOMEM;
printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
pci_name(pdev));
goto out_iounmap;
}
hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
if (!hba->ccb) {
err = -ENOMEM;
printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
pci_name(pdev));
goto out_pci_free;
}
if (hba->cardtype == st_yel)
hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
hba->copy_buffer = hba->dma_mem + cp_offset;
hba->rq_count = ci->rq_count;
hba->rq_size = ci->rq_size;
hba->sts_count = ci->sts_count;
hba->alloc_rq = ci->alloc_rq;
hba->map_sg = ci->map_sg;
hba->send = ci->send;
hba->mu_status = MU_STATE_STARTING;
if (hba->cardtype == st_yel)
host->sg_tablesize = 38;
else
host->sg_tablesize = 32;
host->can_queue = ci->rq_count;
host->cmd_per_lun = ci->rq_count;
host->max_id = ci->max_id;
host->max_lun = ci->max_lun;
host->max_channel = ci->max_channel;
host->unique_id = host->host_no;
host->max_cmd_len = STEX_CDB_LENGTH;
hba->host = host;
hba->pdev = pdev;
err = stex_request_irq(hba);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
pci_name(pdev));
goto out_ccb_free;
}
err = stex_handshake(hba);
if (err)
goto out_free_irq;
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
pci_name(pdev));
goto out_free_irq;
}
pci_set_drvdata(pdev, hba);
err = scsi_add_host(host, &pdev->dev);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
pci_name(pdev));
goto out_free_irq;
}
scsi_scan_host(host);
return 0;
out_free_irq:
stex_free_irq(hba);
out_ccb_free:
kfree(hba->ccb);
out_pci_free:
dma_free_coherent(&pdev->dev, hba->dma_size,
hba->dma_mem, hba->dma_handle);
out_iounmap:
iounmap(hba->mmio_base);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
scsi_host_put(host);
out_disable:
pci_disable_device(pdev);
return err;
}
static void stex_hba_stop(struct st_hba *hba)
{
struct req_msg *req;
struct st_msg_header *msg_h;
unsigned long flags;
unsigned long before;
u16 tag = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
req = hba->alloc_rq(hba);
if (hba->cardtype == st_yel) {
msg_h = (struct st_msg_header *)req - 1;
memset(msg_h, 0, hba->rq_size);
} else
memset(req, 0, hba->rq_size);
if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
req->cdb[0] = MGT_CMD;
req->cdb[1] = MGT_CMD_SIGNATURE;
req->cdb[2] = CTLR_CONFIG_CMD;
req->cdb[3] = CTLR_SHUTDOWN;
} else {
req->cdb[0] = CONTROLLER_CMD;
req->cdb[1] = CTLR_POWER_STATE_CHANGE;
req->cdb[2] = CTLR_POWER_SAVING;
}
hba->ccb[tag].cmd = NULL;
hba->ccb[tag].sg_count = 0;
hba->ccb[tag].sense_bufflen = 0;
hba->ccb[tag].sense_buffer = NULL;
hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
hba->send(hba, req, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
before = jiffies;
while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
hba->ccb[tag].req_type = 0;
return;
}
msleep(1);
}
}
static void stex_hba_free(struct st_hba *hba)
{
stex_free_irq(hba);
iounmap(hba->mmio_base);
pci_release_regions(hba->pdev);
kfree(hba->ccb);
dma_free_coherent(&hba->pdev->dev, hba->dma_size,
hba->dma_mem, hba->dma_handle);
}
static void stex_remove(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
scsi_remove_host(hba->host);
pci_set_drvdata(pdev, NULL);
stex_hba_stop(hba);
stex_hba_free(hba);
scsi_host_put(hba->host);
pci_disable_device(pdev);
}
static void stex_shutdown(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
stex_hba_stop(hba);
}
MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
static struct pci_driver stex_pci_driver = {
.name = DRV_NAME,
.id_table = stex_pci_tbl,
.probe = stex_probe,
.remove = __devexit_p(stex_remove),
.shutdown = stex_shutdown,
};
static int __init stex_init(void)
{
printk(KERN_INFO DRV_NAME
": Promise SuperTrak EX Driver version: %s\n",
ST_DRIVER_VERSION);
return pci_register_driver(&stex_pci_driver);
}
static void __exit stex_exit(void)
{
pci_unregister_driver(&stex_pci_driver);
}
module_init(stex_init);
module_exit(stex_exit);
| gpl-2.0 |
riverzhou/kernel-c8500 | security/selinux/avc.c | 465 | 22584 | /*
* Implementation of the kernel access vector cache (AVC).
*
* Authors: Stephen Smalley, <sds@epoch.ncsc.mil>
* James Morris <jmorris@redhat.com>
*
* Update: KaiGai, Kohei <kaigai@ak.jp.nec.com>
* Replaced the avc_lock spinlock by RCU.
*
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/dcache.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/percpu.h>
#include <net/sock.h>
#include <linux/un.h>
#include <net/af_unix.h>
#include <linux/ip.h>
#include <linux/audit.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include "avc.h"
#include "avc_ss.h"
static const struct av_perm_to_string av_perm_to_string[] = {
#define S_(c, v, s) { c, v, s },
#include "av_perm_to_string.h"
#undef S_
};
static const char *class_to_string[] = {
#define S_(s) s,
#include "class_to_string.h"
#undef S_
};
#define TB_(s) static const char *s[] = {
#define TE_(s) };
#define S_(s) s,
#include "common_perm_to_string.h"
#undef TB_
#undef TE_
#undef S_
static const struct av_inherit av_inherit[] = {
#define S_(c, i, b) { .tclass = c,\
.common_pts = common_##i##_perm_to_string,\
.common_base = b },
#include "av_inherit.h"
#undef S_
};
const struct selinux_class_perm selinux_class_perm = {
.av_perm_to_string = av_perm_to_string,
.av_pts_len = ARRAY_SIZE(av_perm_to_string),
.class_to_string = class_to_string,
.cts_len = ARRAY_SIZE(class_to_string),
.av_inherit = av_inherit,
.av_inherit_len = ARRAY_SIZE(av_inherit)
};
#define AVC_CACHE_SLOTS 512
#define AVC_DEF_CACHE_THRESHOLD 512
#define AVC_CACHE_RECLAIM 16
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
#define avc_cache_stats_incr(field) \
do { \
per_cpu(avc_cache_stats, get_cpu()).field++; \
put_cpu(); \
} while (0)
#else
#define avc_cache_stats_incr(field) do {} while (0)
#endif
struct avc_entry {
u32 ssid;
u32 tsid;
u16 tclass;
struct av_decision avd;
};
struct avc_node {
struct avc_entry ae;
struct hlist_node list; /* anchored in avc_cache->slots[i] */
struct rcu_head rhead;
};
struct avc_cache {
struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
atomic_t lru_hint; /* LRU hint for reclaim scan */
atomic_t active_nodes;
u32 latest_notif; /* latest revocation notification */
};
struct avc_callback_node {
int (*callback) (u32 event, u32 ssid, u32 tsid,
u16 tclass, u32 perms,
u32 *out_retained);
u32 events;
u32 ssid;
u32 tsid;
u16 tclass;
u32 perms;
struct avc_callback_node *next;
};
/* Exported via selinufs */
unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
#endif
static struct avc_cache avc_cache;
static struct avc_callback_node *avc_callbacks;
static struct kmem_cache *avc_node_cachep;
static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
{
return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
}
/**
* avc_dump_av - Display an access vector in human-readable form.
* @tclass: target security class
* @av: access vector
*/
static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
{
const char **common_pts = NULL;
u32 common_base = 0;
int i, i2, perm;
if (av == 0) {
audit_log_format(ab, " null");
return;
}
for (i = 0; i < ARRAY_SIZE(av_inherit); i++) {
if (av_inherit[i].tclass == tclass) {
common_pts = av_inherit[i].common_pts;
common_base = av_inherit[i].common_base;
break;
}
}
audit_log_format(ab, " {");
i = 0;
perm = 1;
while (perm < common_base) {
if (perm & av) {
audit_log_format(ab, " %s", common_pts[i]);
av &= ~perm;
}
i++;
perm <<= 1;
}
while (i < sizeof(av) * 8) {
if (perm & av) {
for (i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++) {
if ((av_perm_to_string[i2].tclass == tclass) &&
(av_perm_to_string[i2].value == perm))
break;
}
if (i2 < ARRAY_SIZE(av_perm_to_string)) {
audit_log_format(ab, " %s",
av_perm_to_string[i2].name);
av &= ~perm;
}
}
i++;
perm <<= 1;
}
if (av)
audit_log_format(ab, " 0x%x", av);
audit_log_format(ab, " }");
}
/**
* avc_dump_query - Display a SID pair and a class in human-readable form.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
*/
static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass)
{
int rc;
char *scontext;
u32 scontext_len;
rc = security_sid_to_context(ssid, &scontext, &scontext_len);
if (rc)
audit_log_format(ab, "ssid=%d", ssid);
else {
audit_log_format(ab, "scontext=%s", scontext);
kfree(scontext);
}
rc = security_sid_to_context(tsid, &scontext, &scontext_len);
if (rc)
audit_log_format(ab, " tsid=%d", tsid);
else {
audit_log_format(ab, " tcontext=%s", scontext);
kfree(scontext);
}
BUG_ON(tclass >= ARRAY_SIZE(class_to_string) || !class_to_string[tclass]);
audit_log_format(ab, " tclass=%s", class_to_string[tclass]);
}
/**
* avc_init - Initialize the AVC.
*
* Initialize the access vector cache.
*/
void __init avc_init(void)
{
int i;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
INIT_HLIST_HEAD(&avc_cache.slots[i]);
spin_lock_init(&avc_cache.slots_lock[i]);
}
atomic_set(&avc_cache.active_nodes, 0);
atomic_set(&avc_cache.lru_hint, 0);
avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
0, SLAB_PANIC, NULL);
audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
}
int avc_get_hash_stats(char *page)
{
int i, chain_len, max_chain_len, slots_used;
struct avc_node *node;
struct hlist_head *head;
rcu_read_lock();
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
head = &avc_cache.slots[i];
if (!hlist_empty(head)) {
struct hlist_node *next;
slots_used++;
chain_len = 0;
hlist_for_each_entry_rcu(node, next, head, list)
chain_len++;
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
rcu_read_unlock();
return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
"longest chain: %d\n",
atomic_read(&avc_cache.active_nodes),
slots_used, AVC_CACHE_SLOTS, max_chain_len);
}
static void avc_node_free(struct rcu_head *rhead)
{
struct avc_node *node = container_of(rhead, struct avc_node, rhead);
kmem_cache_free(avc_node_cachep, node);
avc_cache_stats_incr(frees);
}
static void avc_node_delete(struct avc_node *node)
{
hlist_del_rcu(&node->list);
call_rcu(&node->rhead, avc_node_free);
atomic_dec(&avc_cache.active_nodes);
}
static void avc_node_kill(struct avc_node *node)
{
kmem_cache_free(avc_node_cachep, node);
avc_cache_stats_incr(frees);
atomic_dec(&avc_cache.active_nodes);
}
static void avc_node_replace(struct avc_node *new, struct avc_node *old)
{
hlist_replace_rcu(&old->list, &new->list);
call_rcu(&old->rhead, avc_node_free);
atomic_dec(&avc_cache.active_nodes);
}
static inline int avc_reclaim_node(void)
{
struct avc_node *node;
int hvalue, try, ecx;
unsigned long flags;
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
head = &avc_cache.slots[hvalue];
lock = &avc_cache.slots_lock[hvalue];
if (!spin_trylock_irqsave(lock, flags))
continue;
rcu_read_lock();
hlist_for_each_entry(node, next, head, list) {
avc_node_delete(node);
avc_cache_stats_incr(reclaims);
ecx++;
if (ecx >= AVC_CACHE_RECLAIM) {
rcu_read_unlock();
spin_unlock_irqrestore(lock, flags);
goto out;
}
}
rcu_read_unlock();
spin_unlock_irqrestore(lock, flags);
}
out:
return ecx;
}
static struct avc_node *avc_alloc_node(void)
{
struct avc_node *node;
node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
if (!node)
goto out;
INIT_RCU_HEAD(&node->rhead);
INIT_HLIST_NODE(&node->list);
avc_cache_stats_incr(allocations);
if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
avc_reclaim_node();
out:
return node;
}
static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
{
node->ae.ssid = ssid;
node->ae.tsid = tsid;
node->ae.tclass = tclass;
memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
}
static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
{
struct avc_node *node, *ret = NULL;
int hvalue;
struct hlist_head *head;
struct hlist_node *next;
hvalue = avc_hash(ssid, tsid, tclass);
head = &avc_cache.slots[hvalue];
hlist_for_each_entry_rcu(node, next, head, list) {
if (ssid == node->ae.ssid &&
tclass == node->ae.tclass &&
tsid == node->ae.tsid) {
ret = node;
break;
}
}
return ret;
}
/**
* avc_lookup - Look up an AVC entry.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
*
* Look up an AVC entry that is valid for the
* (@ssid, @tsid), interpreting the permissions
* based on @tclass. If a valid AVC entry exists,
* then this function return the avc_node.
* Otherwise, this function returns NULL.
*/
static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
{
struct avc_node *node;
avc_cache_stats_incr(lookups);
node = avc_search_node(ssid, tsid, tclass);
if (node)
avc_cache_stats_incr(hits);
else
avc_cache_stats_incr(misses);
return node;
}
static int avc_latest_notif_update(int seqno, int is_insert)
{
int ret = 0;
static DEFINE_SPINLOCK(notif_lock);
unsigned long flag;
spin_lock_irqsave(¬if_lock, flag);
if (is_insert) {
if (seqno < avc_cache.latest_notif) {
printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n",
seqno, avc_cache.latest_notif);
ret = -EAGAIN;
}
} else {
if (seqno > avc_cache.latest_notif)
avc_cache.latest_notif = seqno;
}
spin_unlock_irqrestore(¬if_lock, flag);
return ret;
}
/**
* avc_insert - Insert an AVC entry.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @avd: resulting av decision
*
* Insert an AVC entry for the SID pair
* (@ssid, @tsid) and class @tclass.
* The access vectors and the sequence number are
* normally provided by the security server in
* response to a security_compute_av() call. If the
* sequence number @avd->seqno is not less than the latest
* revocation notification, then the function copies
* the access vectors into a cache entry, returns
* avc_node inserted. Otherwise, this function returns NULL.
*/
static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
{
struct avc_node *pos, *node = NULL;
int hvalue;
unsigned long flag;
if (avc_latest_notif_update(avd->seqno, 1))
goto out;
node = avc_alloc_node();
if (node) {
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
hvalue = avc_hash(ssid, tsid, tclass);
avc_node_populate(node, ssid, tsid, tclass, avd);
head = &avc_cache.slots[hvalue];
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
hlist_for_each_entry(pos, next, head, list) {
if (pos->ae.ssid == ssid &&
pos->ae.tsid == tsid &&
pos->ae.tclass == tclass) {
avc_node_replace(node, pos);
goto found;
}
}
hlist_add_head_rcu(&node->list, head);
found:
spin_unlock_irqrestore(lock, flag);
}
out:
return node;
}
/**
* avc_audit_pre_callback - SELinux specific information
* will be called by generic audit code
* @ab: the audit buffer
* @a: audit_data
*/
static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, "avc: %s ",
ad->selinux_audit_data.denied ? "denied" : "granted");
avc_dump_av(ab, ad->selinux_audit_data.tclass,
ad->selinux_audit_data.audited);
audit_log_format(ab, " for ");
}
/**
* avc_audit_post_callback - SELinux specific information
* will be called by generic audit code
* @ab: the audit buffer
* @a: audit_data
*/
static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, " ");
avc_dump_query(ab, ad->selinux_audit_data.ssid,
ad->selinux_audit_data.tsid,
ad->selinux_audit_data.tclass);
}
/**
* avc_audit - Audit the granting or denial of permissions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions
* @avd: access vector decisions
* @result: result from avc_has_perm_noaudit
* @a: auxiliary audit data
*
* Audit the granting or denial of permissions in accordance
* with the policy. This function is typically called by
* avc_has_perm() after a permission check, but can also be
* called directly by callers who use avc_has_perm_noaudit()
* in order to separate the permission check from the auditing.
* For example, this separation is useful when the permission check must
* be performed under a lock, to allow the lock to be released
* before calling the auditing code.
*/
void avc_audit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct av_decision *avd, int result, struct common_audit_data *a)
{
struct common_audit_data stack_data;
u32 denied, audited;
denied = requested & ~avd->allowed;
if (denied) {
audited = denied;
if (!(audited & avd->auditdeny))
return;
} else if (result) {
audited = denied = requested;
} else {
audited = requested;
if (!(audited & avd->auditallow))
return;
}
if (!a) {
a = &stack_data;
memset(a, 0, sizeof(*a));
a->type = LSM_AUDIT_NO_AUDIT;
}
a->selinux_audit_data.tclass = tclass;
a->selinux_audit_data.requested = requested;
a->selinux_audit_data.ssid = ssid;
a->selinux_audit_data.tsid = tsid;
a->selinux_audit_data.audited = audited;
a->selinux_audit_data.denied = denied;
a->lsm_pre_audit = avc_audit_pre_callback;
a->lsm_post_audit = avc_audit_post_callback;
common_lsm_audit(a);
}
/**
* avc_add_callback - Register a callback for security events.
* @callback: callback function
* @events: security events
* @ssid: source security identifier or %SECSID_WILD
* @tsid: target security identifier or %SECSID_WILD
* @tclass: target security class
* @perms: permissions
*
* Register a callback function for events in the set @events
* related to the SID pair (@ssid, @tsid) and
* and the permissions @perms, interpreting
* @perms based on @tclass. Returns %0 on success or
* -%ENOMEM if insufficient memory exists to add the callback.
*/
int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
u16 tclass, u32 perms,
u32 *out_retained),
u32 events, u32 ssid, u32 tsid,
u16 tclass, u32 perms)
{
struct avc_callback_node *c;
int rc = 0;
c = kmalloc(sizeof(*c), GFP_ATOMIC);
if (!c) {
rc = -ENOMEM;
goto out;
}
c->callback = callback;
c->events = events;
c->ssid = ssid;
c->tsid = tsid;
c->perms = perms;
c->next = avc_callbacks;
avc_callbacks = c;
out:
return rc;
}
static inline int avc_sidcmp(u32 x, u32 y)
{
return (x == y || x == SECSID_WILD || y == SECSID_WILD);
}
/**
* avc_update_node Update an AVC entry
* @event : Updating event
* @perms : Permission mask bits
* @ssid,@tsid,@tclass : identifier of an AVC entry
* @seqno : sequence number when decision was made
*
* if a valid AVC entry doesn't exist,this function returns -ENOENT.
* if kmalloc() called internal returns NULL, this function returns -ENOMEM.
* otherwise, this function update the AVC entry. The original AVC-entry object
* will release later by RCU.
*/
static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
u32 seqno)
{
int hvalue, rc = 0;
unsigned long flag;
struct avc_node *pos, *node, *orig = NULL;
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
node = avc_alloc_node();
if (!node) {
rc = -ENOMEM;
goto out;
}
/* Lock the target slot */
hvalue = avc_hash(ssid, tsid, tclass);
head = &avc_cache.slots[hvalue];
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
hlist_for_each_entry(pos, next, head, list) {
if (ssid == pos->ae.ssid &&
tsid == pos->ae.tsid &&
tclass == pos->ae.tclass &&
seqno == pos->ae.avd.seqno){
orig = pos;
break;
}
}
if (!orig) {
rc = -ENOENT;
avc_node_kill(node);
goto out_unlock;
}
/*
* Copy and replace original node.
*/
avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
switch (event) {
case AVC_CALLBACK_GRANT:
node->ae.avd.allowed |= perms;
break;
case AVC_CALLBACK_TRY_REVOKE:
case AVC_CALLBACK_REVOKE:
node->ae.avd.allowed &= ~perms;
break;
case AVC_CALLBACK_AUDITALLOW_ENABLE:
node->ae.avd.auditallow |= perms;
break;
case AVC_CALLBACK_AUDITALLOW_DISABLE:
node->ae.avd.auditallow &= ~perms;
break;
case AVC_CALLBACK_AUDITDENY_ENABLE:
node->ae.avd.auditdeny |= perms;
break;
case AVC_CALLBACK_AUDITDENY_DISABLE:
node->ae.avd.auditdeny &= ~perms;
break;
}
avc_node_replace(node, orig);
out_unlock:
spin_unlock_irqrestore(lock, flag);
out:
return rc;
}
/**
* avc_flush - Flush the cache
*/
static void avc_flush(void)
{
struct hlist_head *head;
struct hlist_node *next;
struct avc_node *node;
spinlock_t *lock;
unsigned long flag;
int i;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
head = &avc_cache.slots[i];
lock = &avc_cache.slots_lock[i];
spin_lock_irqsave(lock, flag);
/*
* With preemptable RCU, the outer spinlock does not
* prevent RCU grace periods from ending.
*/
rcu_read_lock();
hlist_for_each_entry(node, next, head, list)
avc_node_delete(node);
rcu_read_unlock();
spin_unlock_irqrestore(lock, flag);
}
}
/**
* avc_ss_reset - Flush the cache and revalidate migrated permissions.
* @seqno: policy sequence number
*/
int avc_ss_reset(u32 seqno)
{
struct avc_callback_node *c;
int rc = 0, tmprc;
avc_flush();
for (c = avc_callbacks; c; c = c->next) {
if (c->events & AVC_CALLBACK_RESET) {
tmprc = c->callback(AVC_CALLBACK_RESET,
0, 0, 0, 0, NULL);
/* save the first error encountered for the return
value and continue processing the callbacks */
if (!rc)
rc = tmprc;
}
}
avc_latest_notif_update(seqno, 0);
return rc;
}
/**
* avc_has_perm_noaudit - Check permissions but perform no auditing.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
* @flags: AVC_STRICT or 0
* @avd: access vector decisions
*
* Check the AVC to determine whether the @requested permissions are granted
* for the SID pair (@ssid, @tsid), interpreting the permissions
* based on @tclass, and call the security server on a cache miss to obtain
* a new decision and add it to the cache. Return a copy of the decisions
* in @avd. Return %0 if all @requested permissions are granted,
* -%EACCES if any permissions are denied, or another -errno upon
* other errors. This function is typically called by avc_has_perm(),
* but may also be called directly to separate permission checking from
* auditing, e.g. in cases where a lock must be held for the check but
* should be released for the auditing.
*/
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
unsigned flags,
struct av_decision *in_avd)
{
struct avc_node *node;
struct av_decision avd_entry, *avd;
int rc = 0;
u32 denied;
BUG_ON(!requested);
rcu_read_lock();
node = avc_lookup(ssid, tsid, tclass);
if (!node) {
rcu_read_unlock();
if (in_avd)
avd = in_avd;
else
avd = &avd_entry;
rc = security_compute_av(ssid, tsid, tclass, requested, avd);
if (rc)
goto out;
rcu_read_lock();
node = avc_insert(ssid, tsid, tclass, avd);
} else {
if (in_avd)
memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
avd = &node->ae.avd;
}
denied = requested & ~(avd->allowed);
if (denied) {
if (flags & AVC_STRICT)
rc = -EACCES;
else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE))
avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
tsid, tclass, avd->seqno);
else
rc = -EACCES;
}
rcu_read_unlock();
out:
return rc;
}
/**
* avc_has_perm - Check permissions and perform any appropriate auditing.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
* @auditdata: auxiliary audit data
*
* Check the AVC to determine whether the @requested permissions are granted
* for the SID pair (@ssid, @tsid), interpreting the permissions
* based on @tclass, and call the security server on a cache miss to obtain
* a new decision and add it to the cache. Audit the granting or denial of
* permissions in accordance with the policy. Return %0 if all @requested
* permissions are granted, -%EACCES if any permissions are denied, or
* another -errno upon other errors.
*/
int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
u32 requested, struct common_audit_data *auditdata)
{
struct av_decision avd;
int rc;
rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
return rc;
}
u32 avc_policy_seqno(void)
{
return avc_cache.latest_notif;
}
void avc_disable(void)
{
/*
* If you are looking at this because you have realized that we are
* not destroying the avc_node_cachep it might be easy to fix, but
* I don't know the memory barrier semantics well enough to know. It's
* possible that some other task dereferenced security_ops when
* it still pointed to selinux operations. If that is the case it's
* possible that it is about to use the avc and is about to need the
* avc_node_cachep. I know I could wrap the security.c security_ops call
* in an rcu_lock, but seriously, it's not worth it. Instead I just flush
* the cache and get that memory back.
*/
if (avc_node_cachep) {
avc_flush();
/* kmem_cache_destroy(avc_node_cachep); */
}
}
| gpl-2.0 |
dorimanx/Dorimanx-HD2-2.6.32.X | drivers/media/video/cx23885/cx23885-video.c | 465 | 40288 | /*
* Driver for the Conexant CX23885 PCIe bridge
*
* Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <asm/div64.h>
#include "cx23885.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
MODULE_DESCRIPTION("v4l2 driver module for cx23885 based TV cards");
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
/* ------------------------------------------------------------------ */
static unsigned int video_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
static unsigned int radio_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
module_param_array(radio_nr, int, NULL, 0444);
MODULE_PARM_DESC(video_nr, "video device numbers");
MODULE_PARM_DESC(vbi_nr, "vbi device numbers");
MODULE_PARM_DESC(radio_nr, "radio device numbers");
static unsigned int video_debug;
module_param(video_debug, int, 0644);
MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
static unsigned int irq_debug;
module_param(irq_debug, int, 0644);
MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
static unsigned int vid_limit = 16;
module_param(vid_limit, int, 0644);
MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
#define dprintk(level, fmt, arg...)\
do { if (video_debug >= level)\
printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
} while (0)
/* ------------------------------------------------------------------- */
/* static data */
#define FORMAT_FLAGS_PACKED 0x01
static struct cx23885_fmt formats[] = {
{
.name = "8 bpp, gray",
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.depth = 24,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}, {
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},
};
static struct cx23885_fmt *format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fourcc)
return formats+i;
printk(KERN_ERR "%s(0x%08x) NOT FOUND\n", __func__, fourcc);
return NULL;
}
/* ------------------------------------------------------------------- */
static const struct v4l2_queryctrl no_ctl = {
.name = "42",
.flags = V4L2_CTRL_FLAG_DISABLED,
};
static struct cx23885_ctrl cx23885_ctls[] = {
/* --- video --- */
{
.v = {
.id = V4L2_CID_BRIGHTNESS,
.name = "Brightness",
.minimum = 0x00,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 128,
.reg = LUMA_CTRL,
.mask = 0x00ff,
.shift = 0,
}, {
.v = {
.id = V4L2_CID_CONTRAST,
.name = "Contrast",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
.reg = LUMA_CTRL,
.mask = 0xff00,
.shift = 8,
}, {
.v = {
.id = V4L2_CID_HUE,
.name = "Hue",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 128,
.reg = CHROMA_CTRL,
.mask = 0xff0000,
.shift = 16,
}, {
/* strictly, this only describes only U saturation.
* V saturation is handled specially through code.
*/
.v = {
.id = V4L2_CID_SATURATION,
.name = "Saturation",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
.reg = CHROMA_CTRL,
.mask = 0x00ff,
.shift = 0,
}, {
/* --- audio --- */
.v = {
.id = V4L2_CID_AUDIO_MUTE,
.name = "Mute",
.minimum = 0,
.maximum = 1,
.default_value = 1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
},
.reg = PATH1_CTL1,
.mask = (0x1f << 24),
.shift = 24,
}, {
.v = {
.id = V4L2_CID_AUDIO_VOLUME,
.name = "Volume",
.minimum = 0,
.maximum = 0x3f,
.step = 1,
.default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.reg = PATH1_VOL_CTL,
.mask = 0xff,
.shift = 0,
}
};
static const int CX23885_CTLS = ARRAY_SIZE(cx23885_ctls);
/* Must be sorted from low to high control ID! */
static const u32 cx23885_user_ctrls[] = {
V4L2_CID_USER_CLASS,
V4L2_CID_BRIGHTNESS,
V4L2_CID_CONTRAST,
V4L2_CID_SATURATION,
V4L2_CID_HUE,
V4L2_CID_AUDIO_VOLUME,
V4L2_CID_AUDIO_MUTE,
0
};
static const u32 *ctrl_classes[] = {
cx23885_user_ctrls,
NULL
};
static void cx23885_video_wakeup(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q, u32 count)
{
struct cx23885_buffer *buf;
int bc;
for (bc = 0;; bc++) {
if (list_empty(&q->active))
break;
buf = list_entry(q->active.next,
struct cx23885_buffer, vb.queue);
/* count comes from the hw and is is 16bit wide --
* this trick handles wrap-arounds correctly for
* up to 32767 buffers in flight... */
if ((s16) (count - buf->count) < 0)
break;
do_gettimeofday(&buf->vb.ts);
dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
}
if (list_empty(&q->active))
del_timer(&q->timeout);
else
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
if (bc != 1)
printk(KERN_ERR "%s: %d buffers handled (should be 1)\n",
__func__, bc);
}
static int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
{
dprintk(1, "%s(norm = 0x%08x) name: [%s]\n",
__func__,
(unsigned int)norm,
v4l2_norm_to_name(norm));
dev->tvnorm = norm;
call_all(dev, core, s_std, norm);
return 0;
}
static struct video_device *cx23885_vdev_init(struct cx23885_dev *dev,
struct pci_dev *pci,
struct video_device *template,
char *type)
{
struct video_device *vfd;
dprintk(1, "%s()\n", __func__);
vfd = video_device_alloc();
if (NULL == vfd)
return NULL;
*vfd = *template;
vfd->minor = -1;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
dev->name, type, cx23885_boards[dev->board].name);
return vfd;
}
static int cx23885_ctrl_query(struct v4l2_queryctrl *qctrl)
{
int i;
if (qctrl->id < V4L2_CID_BASE ||
qctrl->id >= V4L2_CID_LASTP1)
return -EINVAL;
for (i = 0; i < CX23885_CTLS; i++)
if (cx23885_ctls[i].v.id == qctrl->id)
break;
if (i == CX23885_CTLS) {
*qctrl = no_ctl;
return 0;
}
*qctrl = cx23885_ctls[i].v;
return 0;
}
/* ------------------------------------------------------------------- */
/* resource management */
static int res_get(struct cx23885_dev *dev, struct cx23885_fh *fh,
unsigned int bit)
{
dprintk(1, "%s()\n", __func__);
if (fh->resources & bit)
/* have it already allocated */
return 1;
/* is it free? */
mutex_lock(&dev->lock);
if (dev->resources & bit) {
/* no, someone else uses it */
mutex_unlock(&dev->lock);
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
dev->resources |= bit;
dprintk(1, "res: get %d\n", bit);
mutex_unlock(&dev->lock);
return 1;
}
static int res_check(struct cx23885_fh *fh, unsigned int bit)
{
return fh->resources & bit;
}
static int res_locked(struct cx23885_dev *dev, unsigned int bit)
{
return dev->resources & bit;
}
static void res_free(struct cx23885_dev *dev, struct cx23885_fh *fh,
unsigned int bits)
{
BUG_ON((fh->resources & bits) != bits);
dprintk(1, "%s()\n", __func__);
mutex_lock(&dev->lock);
fh->resources &= ~bits;
dev->resources &= ~bits;
dprintk(1, "res: put %d\n", bits);
mutex_unlock(&dev->lock);
}
static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
{
dprintk(1, "%s() video_mux: %d [vmux=%d, gpio=0x%x,0x%x,0x%x,0x%x]\n",
__func__,
input, INPUT(input)->vmux,
INPUT(input)->gpio0, INPUT(input)->gpio1,
INPUT(input)->gpio2, INPUT(input)->gpio3);
dev->input = input;
/* Tell the internal A/V decoder */
v4l2_subdev_call(dev->sd_cx25840, video, s_routing,
INPUT(input)->vmux, 0, 0);
return 0;
}
/* ------------------------------------------------------------------ */
static int cx23885_set_scale(struct cx23885_dev *dev, unsigned int width,
unsigned int height, enum v4l2_field field)
{
dprintk(1, "%s()\n", __func__);
return 0;
}
static int cx23885_start_video_dma(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q,
struct cx23885_buffer *buf)
{
dprintk(1, "%s()\n", __func__);
/* setup fifo + format */
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
buf->bpl, buf->risc.dma);
cx23885_set_scale(dev, buf->vb.width, buf->vb.height, buf->vb.field);
/* reset counter */
cx_write(VID_A_GPCNT_CTL, 3);
q->count = 1;
/* enable irq */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | 0x01);
cx_set(VID_A_INT_MSK, 0x000011);
/* start dma */
cx_set(DEV_CNTRL2, (1<<5));
cx_set(VID_A_DMA_CTL, 0x11); /* FIFO and RISC enable */
return 0;
}
static int cx23885_restart_video_queue(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q)
{
struct cx23885_buffer *buf, *prev;
struct list_head *item;
dprintk(1, "%s()\n", __func__);
if (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx23885_buffer,
vb.queue);
dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.i);
cx23885_start_video_dma(dev, q, buf);
list_for_each(item, &q->active) {
buf = list_entry(item, struct cx23885_buffer,
vb.queue);
buf->count = q->count++;
}
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
prev = NULL;
for (;;) {
if (list_empty(&q->queued))
return 0;
buf = list_entry(q->queued.next, struct cx23885_buffer,
vb.queue);
if (NULL == prev) {
list_move_tail(&buf->vb.queue, &q->active);
cx23885_start_video_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(2, "[%p/%d] restart_queue - first active\n",
buf, buf->vb.i);
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63 - 32 */
dprintk(2, "[%p/%d] restart_queue - move to active\n",
buf, buf->vb.i);
} else {
return 0;
}
prev = buf;
}
}
static int buffer_setup(struct videobuf_queue *q, unsigned int *count,
unsigned int *size)
{
struct cx23885_fh *fh = q->priv_data;
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
while (*size * *count > vid_limit * 1024 * 1024)
(*count)--;
return 0;
}
static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx23885_fh *fh = q->priv_data;
struct cx23885_dev *dev = fh->dev;
struct cx23885_buffer *buf =
container_of(vb, struct cx23885_buffer, vb);
int rc, init_buffer = 0;
u32 line0_offset, line1_offset;
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
BUG_ON(NULL == fh->fmt);
if (fh->width < 48 || fh->width > norm_maxw(dev->tvnorm) ||
fh->height < 32 || fh->height > norm_maxh(dev->tvnorm))
return -EINVAL;
buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
if (buf->fmt != fh->fmt ||
buf->vb.width != fh->width ||
buf->vb.height != fh->height ||
buf->vb.field != field) {
buf->fmt = fh->fmt;
buf->vb.width = fh->width;
buf->vb.height = fh->height;
buf->vb.field = field;
init_buffer = 1;
}
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
init_buffer = 1;
rc = videobuf_iolock(q, &buf->vb, NULL);
if (0 != rc)
goto fail;
}
if (init_buffer) {
buf->bpl = buf->vb.width * buf->fmt->depth >> 3;
switch (buf->vb.field) {
case V4L2_FIELD_TOP:
cx23885_risc_buffer(dev->pci, &buf->risc,
dma->sglist, 0, UNSET,
buf->bpl, 0, buf->vb.height);
break;
case V4L2_FIELD_BOTTOM:
cx23885_risc_buffer(dev->pci, &buf->risc,
dma->sglist, UNSET, 0,
buf->bpl, 0, buf->vb.height);
break;
case V4L2_FIELD_INTERLACED:
if (dev->tvnorm & V4L2_STD_NTSC) {
/* cx25840 transmits NTSC bottom field first */
dprintk(1, "%s() Creating NTSC risc\n",
__func__);
line0_offset = buf->bpl;
line1_offset = 0;
} else {
/* All other formats are top field first */
dprintk(1, "%s() Creating PAL/SECAM risc\n",
__func__);
line0_offset = 0;
line1_offset = buf->bpl;
}
cx23885_risc_buffer(dev->pci, &buf->risc,
dma->sglist, line0_offset,
line1_offset,
buf->bpl, buf->bpl,
buf->vb.height >> 1);
break;
case V4L2_FIELD_SEQ_TB:
cx23885_risc_buffer(dev->pci, &buf->risc,
dma->sglist,
0, buf->bpl * (buf->vb.height >> 1),
buf->bpl, 0,
buf->vb.height >> 1);
break;
case V4L2_FIELD_SEQ_BT:
cx23885_risc_buffer(dev->pci, &buf->risc,
dma->sglist,
buf->bpl * (buf->vb.height >> 1), 0,
buf->bpl, 0,
buf->vb.height >> 1);
break;
default:
BUG();
}
}
dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
buf, buf->vb.i,
fh->width, fh->height, fh->fmt->depth, fh->fmt->name,
(unsigned long)buf->risc.dma);
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
fail:
cx23885_free_buffer(q, buf);
return rc;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
struct cx23885_buffer *prev;
struct cx23885_fh *fh = vq->priv_data;
struct cx23885_dev *dev = fh->dev;
struct cx23885_dmaqueue *q = &dev->vidq;
/* add jump to stopper */
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
if (!list_empty(&q->queued)) {
list_add_tail(&buf->vb.queue, &q->queued);
buf->vb.state = VIDEOBUF_QUEUED;
dprintk(2, "[%p/%d] buffer_queue - append to queued\n",
buf, buf->vb.i);
} else if (list_empty(&q->active)) {
list_add_tail(&buf->vb.queue, &q->active);
cx23885_start_video_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(2, "[%p/%d] buffer_queue - first active\n",
buf, buf->vb.i);
} else {
prev = list_entry(q->active.prev, struct cx23885_buffer,
vb.queue);
if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
list_add_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
/* 64 bit bits 63-32 */
prev->risc.jmp[2] = cpu_to_le32(0);
dprintk(2, "[%p/%d] buffer_queue - append to active\n",
buf, buf->vb.i);
} else {
list_add_tail(&buf->vb.queue, &q->queued);
buf->vb.state = VIDEOBUF_QUEUED;
dprintk(2, "[%p/%d] buffer_queue - first queued\n",
buf, buf->vb.i);
}
}
}
static void buffer_release(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
cx23885_free_buffer(q, buf);
}
static struct videobuf_queue_ops cx23885_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
static struct videobuf_queue *get_queue(struct cx23885_fh *fh)
{
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &fh->vidq;
case V4L2_BUF_TYPE_VBI_CAPTURE:
return &fh->vbiq;
default:
BUG();
return NULL;
}
}
static int get_resource(struct cx23885_fh *fh)
{
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return RESOURCE_VIDEO;
case V4L2_BUF_TYPE_VBI_CAPTURE:
return RESOURCE_VBI;
default:
BUG();
return 0;
}
}
static int video_open(struct file *file)
{
int minor = video_devdata(file)->minor;
struct cx23885_dev *h, *dev = NULL;
struct cx23885_fh *fh;
struct list_head *list;
enum v4l2_buf_type type = 0;
int radio = 0;
lock_kernel();
list_for_each(list, &cx23885_devlist) {
h = list_entry(list, struct cx23885_dev, devlist);
if (h->video_dev &&
h->video_dev->minor == minor) {
dev = h;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
}
if (h->vbi_dev &&
h->vbi_dev->minor == minor) {
dev = h;
type = V4L2_BUF_TYPE_VBI_CAPTURE;
}
if (h->radio_dev &&
h->radio_dev->minor == minor) {
radio = 1;
dev = h;
}
}
if (NULL == dev) {
unlock_kernel();
return -ENODEV;
}
dprintk(1, "open minor=%d radio=%d type=%s\n",
minor, radio, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (NULL == fh) {
unlock_kernel();
return -ENOMEM;
}
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
fh->type = type;
fh->width = 320;
fh->height = 240;
fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
videobuf_queue_sg_init(&fh->vidq, &cx23885_video_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx23885_buffer),
fh);
dprintk(1, "post videobuf_queue_init()\n");
unlock_kernel();
return 0;
}
static ssize_t video_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct cx23885_fh *fh = file->private_data;
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (res_locked(fh->dev, RESOURCE_VIDEO))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (!res_get(fh->dev, fh, RESOURCE_VBI))
return -EBUSY;
return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1,
file->f_flags & O_NONBLOCK);
default:
BUG();
return 0;
}
}
static unsigned int video_poll(struct file *file,
struct poll_table_struct *wait)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_buffer *buf;
unsigned int rc = POLLERR;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
if (!res_get(fh->dev, fh, RESOURCE_VBI))
return POLLERR;
return videobuf_poll_stream(file, &fh->vbiq, wait);
}
mutex_lock(&fh->vidq.vb_lock);
if (res_check(fh, RESOURCE_VIDEO)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
goto done;
buf = list_entry(fh->vidq.stream.next,
struct cx23885_buffer, vb.stream);
} else {
/* read() capture */
buf = (struct cx23885_buffer *)fh->vidq.read_buf;
if (NULL == buf)
goto done;
}
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
rc = POLLIN|POLLRDNORM;
else
rc = 0;
done:
mutex_unlock(&fh->vidq.vb_lock);
return rc;
}
static int video_release(struct file *file)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
/* turn off overlay */
if (res_check(fh, RESOURCE_OVERLAY)) {
/* FIXME */
res_free(dev, fh, RESOURCE_OVERLAY);
}
/* stop video capture */
if (res_check(fh, RESOURCE_VIDEO)) {
videobuf_queue_cancel(&fh->vidq);
res_free(dev, fh, RESOURCE_VIDEO);
}
if (fh->vidq.read_buf) {
buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
/* stop vbi capture */
if (res_check(fh, RESOURCE_VBI)) {
if (fh->vbiq.streaming)
videobuf_streamoff(&fh->vbiq);
if (fh->vbiq.reading)
videobuf_read_stop(&fh->vbiq);
res_free(dev, fh, RESOURCE_VBI);
}
videobuf_mmap_free(&fh->vidq);
file->private_data = NULL;
kfree(fh);
/* We are not putting the tuner to sleep here on exit, because
* we want to use the mpeg encoder in another session to capture
* tuner video. Closing this will result in no video to the encoder.
*/
return 0;
}
static int video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct cx23885_fh *fh = file->private_data;
return videobuf_mmap_mapper(get_queue(fh), vma);
}
/* ------------------------------------------------------------------ */
/* VIDEO CTRL IOCTLS */
static int cx23885_get_control(struct cx23885_dev *dev,
struct v4l2_control *ctl)
{
dprintk(1, "%s() calling cx25840(VIDIOC_G_CTRL)\n", __func__);
call_all(dev, core, g_ctrl, ctl);
return 0;
}
static int cx23885_set_control(struct cx23885_dev *dev,
struct v4l2_control *ctl)
{
dprintk(1, "%s() calling cx25840(VIDIOC_S_CTRL)"
" (disabled - no action)\n", __func__);
return 0;
}
static void init_controls(struct cx23885_dev *dev)
{
struct v4l2_control ctrl;
int i;
for (i = 0; i < CX23885_CTLS; i++) {
ctrl.id = cx23885_ctls[i].v.id;
ctrl.value = cx23885_ctls[i].v.default_value;
cx23885_set_control(dev, &ctrl);
}
}
/* ------------------------------------------------------------------ */
/* VIDEO IOCTLS */
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx23885_fh *fh = priv;
f->fmt.pix.width = fh->width;
f->fmt.pix.height = fh->height;
f->fmt.pix.field = fh->vidq.field;
f->fmt.pix.pixelformat = fh->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fh->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
struct cx23885_fmt *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt)
return -EINVAL;
field = f->fmt.pix.field;
maxw = norm_maxw(dev->tvnorm);
maxh = norm_maxh(dev->tvnorm);
if (V4L2_FIELD_ANY == field) {
field = (f->fmt.pix.height > maxh/2)
? V4L2_FIELD_INTERLACED
: V4L2_FIELD_BOTTOM;
}
switch (field) {
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
maxh = maxh / 2;
break;
case V4L2_FIELD_INTERLACED:
break;
default:
return -EINVAL;
}
f->fmt.pix.field = field;
v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
&f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
int err;
dprintk(2, "%s()\n", __func__);
err = vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
fh->width = f->fmt.pix.width;
fh->height = f->fmt.pix.height;
fh->vidq.field = f->fmt.pix.field;
dprintk(2, "%s() width=%d height=%d field=%d\n", __func__,
fh->width, fh->height, fh->vidq.field);
call_all(dev, video, s_fmt, f);
return 0;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
strcpy(cap->driver, "cx23885");
strlcpy(cap->card, cx23885_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
cap->version = CX23885_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING |
V4L2_CAP_VBI_CAPTURE;
if (UNSET != dev->tuner_type)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
strlcpy(f->description, formats[f->index].name,
sizeof(f->description));
f->pixelformat = formats[f->index].fourcc;
return 0;
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int vidiocgmbuf(struct file *file, void *priv,
struct video_mbuf *mbuf)
{
struct cx23885_fh *fh = priv;
struct videobuf_queue *q;
struct v4l2_requestbuffers req;
unsigned int i;
int err;
q = get_queue(fh);
memset(&req, 0, sizeof(req));
req.type = q->type;
req.count = 8;
req.memory = V4L2_MEMORY_MMAP;
err = videobuf_reqbufs(q, &req);
if (err < 0)
return err;
mbuf->frames = req.count;
mbuf->size = 0;
for (i = 0; i < mbuf->frames; i++) {
mbuf->offsets[i] = q->bufs[i]->boff;
mbuf->size += q->bufs[i]->bsize;
}
return 0;
}
#endif
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
struct cx23885_fh *fh = priv;
return videobuf_reqbufs(get_queue(fh), p);
}
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct cx23885_fh *fh = priv;
return videobuf_querybuf(get_queue(fh), p);
}
static int vidioc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct cx23885_fh *fh = priv;
return videobuf_qbuf(get_queue(fh), p);
}
static int vidioc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct cx23885_fh *fh = priv;
return videobuf_dqbuf(get_queue(fh), p,
file->f_flags & O_NONBLOCK);
}
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type i)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
dprintk(1, "%s()\n", __func__);
if (unlikely(fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE))
return -EINVAL;
if (unlikely(i != fh->type))
return -EINVAL;
if (unlikely(!res_get(dev, fh, get_resource(fh))))
return -EBUSY;
return videobuf_streamon(get_queue(fh));
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
int err, res;
dprintk(1, "%s()\n", __func__);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (i != fh->type)
return -EINVAL;
res = get_resource(fh);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
res_free(dev, fh, res);
return 0;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *tvnorms)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
dprintk(1, "%s()\n", __func__);
mutex_lock(&dev->lock);
cx23885_set_tvnorm(dev, *tvnorms);
mutex_unlock(&dev->lock);
return 0;
}
static int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
{
static const char *iname[] = {
[CX23885_VMUX_COMPOSITE1] = "Composite1",
[CX23885_VMUX_COMPOSITE2] = "Composite2",
[CX23885_VMUX_COMPOSITE3] = "Composite3",
[CX23885_VMUX_COMPOSITE4] = "Composite4",
[CX23885_VMUX_SVIDEO] = "S-Video",
[CX23885_VMUX_TELEVISION] = "Television",
[CX23885_VMUX_CABLE] = "Cable TV",
[CX23885_VMUX_DVB] = "DVB",
[CX23885_VMUX_DEBUG] = "for debug only",
};
unsigned int n;
dprintk(1, "%s()\n", __func__);
n = i->index;
if (n >= 4)
return -EINVAL;
if (0 == INPUT(n)->type)
return -EINVAL;
memset(i, 0, sizeof(*i));
i->index = n;
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name, iname[INPUT(n)->type]);
if ((CX23885_VMUX_TELEVISION == INPUT(n)->type) ||
(CX23885_VMUX_CABLE == INPUT(n)->type))
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = CX23885_NORMS;
return 0;
}
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
dprintk(1, "%s()\n", __func__);
return cx23885_enum_input(dev, i);
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
*i = dev->input;
dprintk(1, "%s() returns %d\n", __func__, *i);
return 0;
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
dprintk(1, "%s(%d)\n", __func__, i);
if (i >= 4) {
dprintk(1, "%s() -EINVAL\n", __func__);
return -EINVAL;
}
mutex_lock(&dev->lock);
cx23885_video_mux(dev, i);
mutex_unlock(&dev->lock);
return 0;
}
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
if (unlikely(qctrl->id == 0))
return -EINVAL;
return cx23885_ctrl_query(qctrl);
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
return cx23885_get_control(dev, ctl);
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
return cx23885_set_control(dev, ctl);
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
if (unlikely(UNSET == dev->tuner_type))
return -EINVAL;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
t->rangehigh = 0xffffffffUL;
t->signal = 0xffff ; /* LOCKED */
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
if (UNSET == dev->tuner_type)
return -EINVAL;
if (0 != t->index)
return -EINVAL;
return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
if (unlikely(UNSET == dev->tuner_type))
return -EINVAL;
/* f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; */
f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->freq;
call_all(dev, tuner, g_frequency, f);
return 0;
}
static int cx23885_set_freq(struct cx23885_dev *dev, struct v4l2_frequency *f)
{
if (unlikely(UNSET == dev->tuner_type))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
mutex_lock(&dev->lock);
dev->freq = f->frequency;
call_all(dev, tuner, s_frequency, f);
/* When changing channels it is required to reset TVAUDIO */
msleep(10);
mutex_unlock(&dev->lock);
return 0;
}
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV))
return -EINVAL;
if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
return -EINVAL;
return
cx23885_set_freq(dev, f);
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vidioc_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev;
if (!v4l2_chip_match_host(®->match))
return -EINVAL;
call_all(dev, core, g_register, reg);
return 0;
}
static int vidioc_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev;
if (!v4l2_chip_match_host(®->match))
return -EINVAL;
call_all(dev, core, s_register, reg);
return 0;
}
#endif
/* ----------------------------------------------------------- */
static void cx23885_vid_timeout(unsigned long data)
{
struct cx23885_dev *dev = (struct cx23885_dev *)data;
struct cx23885_dmaqueue *q = &dev->vidq;
struct cx23885_buffer *buf;
unsigned long flags;
cx23885_sram_channel_dump(dev, &dev->sram_channels[SRAM_CH01]);
cx_clear(VID_A_DMA_CTL, 0x11);
spin_lock_irqsave(&dev->slock, flags);
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next,
struct cx23885_buffer, vb.queue);
list_del(&buf->vb.queue);
buf->vb.state = VIDEOBUF_ERROR;
wake_up(&buf->vb.done);
printk(KERN_ERR "%s/0: [%p/%d] timeout - dma=0x%08lx\n",
dev->name, buf, buf->vb.i,
(unsigned long)buf->risc.dma);
}
cx23885_restart_video_queue(dev, q);
spin_unlock_irqrestore(&dev->slock, flags);
}
int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
{
u32 mask, count;
int handled = 0;
mask = cx_read(VID_A_INT_MSK);
if (0 == (status & mask))
return handled;
cx_write(VID_A_INT_STAT, status);
dprintk(2, "%s() status = 0x%08x\n", __func__, status);
/* risc op code error */
if (status & (1 << 16)) {
printk(KERN_WARNING "%s/0: video risc op code error\n",
dev->name);
cx_clear(VID_A_DMA_CTL, 0x11);
cx23885_sram_channel_dump(dev, &dev->sram_channels[SRAM_CH01]);
}
/* risc1 y */
if (status & 0x01) {
spin_lock(&dev->slock);
count = cx_read(VID_A_GPCNT);
cx23885_video_wakeup(dev, &dev->vidq, count);
spin_unlock(&dev->slock);
handled++;
}
/* risc2 y */
if (status & 0x10) {
dprintk(2, "stopper video\n");
spin_lock(&dev->slock);
cx23885_restart_video_queue(dev, &dev->vidq);
spin_unlock(&dev->slock);
handled++;
}
return handled;
}
/* ----------------------------------------------------------- */
/* exported stuff */
static const struct v4l2_file_operations video_fops = {
.owner = THIS_MODULE,
.open = video_open,
.release = video_release,
.read = video_read,
.poll = video_poll,
.mmap = video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_g_fmt_vbi_cap = cx23885_vbi_fmt,
.vidioc_try_fmt_vbi_cap = cx23885_vbi_fmt,
.vidioc_s_fmt_vbi_cap = cx23885_vbi_fmt,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
};
static struct video_device cx23885_vbi_template;
static struct video_device cx23885_video_template = {
.name = "cx23885-video",
.fops = &video_fops,
.minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX23885_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
static const struct v4l2_file_operations radio_fops = {
.owner = THIS_MODULE,
.open = video_open,
.release = video_release,
.ioctl = video_ioctl2,
};
void cx23885_video_unregister(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
cx_clear(PCI_INT_MSK, 1);
if (dev->video_dev) {
if (-1 != dev->video_dev->minor)
video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
dev->video_dev = NULL;
btcx_riscmem_free(dev->pci, &dev->vidq.stopper);
}
}
int cx23885_video_register(struct cx23885_dev *dev)
{
int err;
dprintk(1, "%s()\n", __func__);
spin_lock_init(&dev->slock);
/* Initialize VBI template */
memcpy(&cx23885_vbi_template, &cx23885_video_template,
sizeof(cx23885_vbi_template));
strcpy(cx23885_vbi_template.name, "cx23885-vbi");
dev->tvnorm = cx23885_video_template.current_norm;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vidq.queued);
dev->vidq.timeout.function = cx23885_vid_timeout;
dev->vidq.timeout.data = (unsigned long)dev;
init_timer(&dev->vidq.timeout);
cx23885_risc_stopper(dev->pci, &dev->vidq.stopper,
VID_A_DMA_CTL, 0x11, 0x00);
/* Don't enable VBI yet */
cx_set(PCI_INT_MSK, 1);
if (TUNER_ABSENT != dev->tuner_type) {
struct v4l2_subdev *sd = NULL;
if (dev->tuner_addr)
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[1].i2c_adap,
"tuner", "tuner", dev->tuner_addr, NULL);
else
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[1].i2c_adap,
"tuner", "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV));
if (sd) {
struct tuner_setup tun_setup;
tun_setup.mode_mask = T_ANALOG_TV;
tun_setup.type = dev->tuner_type;
tun_setup.addr = v4l2_i2c_subdev_addr(sd);
v4l2_subdev_call(sd, tuner, s_type_addr, &tun_setup);
}
}
/* register v4l devices */
dev->video_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_video_template, "video");
err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
printk(KERN_INFO "%s: can't register video device\n",
dev->name);
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
dev->name, dev->video_dev->num);
/* initial device configuration */
mutex_lock(&dev->lock);
cx23885_set_tvnorm(dev, dev->tvnorm);
init_controls(dev);
cx23885_video_mux(dev, 0);
mutex_unlock(&dev->lock);
return 0;
fail_unreg:
cx23885_video_unregister(dev);
return err;
}
| gpl-2.0 |
shubhangi-shrivastava/drm-intel-nightly | drivers/of/of_reserved_mem.c | 721 | 7588 | /*
* Device tree based initialization code for reserved memory.
*
* Copyright (c) 2013, The Linux Foundation. All Rights Reserved.
* Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
* Author: Josh Cartwright <joshc@codeaurora.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License or (at your optional) any later version of the license.
*/
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <linux/of_reserved_mem.h>
#define MAX_RESERVED_REGIONS 16
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
#if defined(CONFIG_HAVE_MEMBLOCK)
#include <linux/memblock.h>
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
/*
* We use __memblock_alloc_base() because memblock_alloc_base()
* panic()s on allocation failure.
*/
phys_addr_t base = __memblock_alloc_base(size, align, end);
if (!base)
return -ENOMEM;
/*
* Check if the allocated region fits in to start..end window
*/
if (base < start) {
memblock_free(base, size);
return -ENOMEM;
}
*res_base = base;
if (nomap)
return memblock_remove(base, size);
return 0;
}
#else
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
size, nomap ? " (nomap)" : "");
return -ENOSYS;
}
#endif
/**
* res_mem_save_node() - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
pr_err("Reserved memory: not enough space all defined regions.\n");
return;
}
rmem->fdt_node = node;
rmem->name = uname;
rmem->base = base;
rmem->size = size;
reserved_mem_count++;
return;
}
/**
* res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
* and 'alloc-ranges' properties
*/
static int __init __reserved_mem_alloc_size(unsigned long node,
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t start = 0, end = 0;
phys_addr_t base = 0, align = 0, size;
int len;
const __be32 *prop;
int nomap;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
if (!prop)
return -EINVAL;
if (len != dt_root_size_cells * sizeof(__be32)) {
pr_err("Reserved memory: invalid size property in '%s' node.\n",
uname);
return -EINVAL;
}
size = dt_mem_next_cell(dt_root_size_cells, &prop);
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
if (len != dt_root_addr_cells * sizeof(__be32)) {
pr_err("Reserved memory: invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
align = dt_mem_next_cell(dt_root_addr_cells, &prop);
}
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
if (len % t_len != 0) {
pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
base = 0;
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
ret = early_init_dt_alloc_reserved_memory_arch(size,
align, start, end, nomap, &base);
if (ret == 0) {
pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base,
(unsigned long)size / SZ_1M);
break;
}
len -= t_len;
}
} else {
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
0, 0, nomap, &base);
if (ret == 0)
pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
}
if (base == 0) {
pr_info("Reserved memory: failed to allocate memory for node '%s'\n",
uname);
return -ENOMEM;
}
*res_base = base;
*res_size = size;
return 0;
}
static const struct of_device_id __rmem_of_table_sentinel
__used __section(__reservedmem_of_table_end);
/**
* res_mem_init_node() - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
extern const struct of_device_id __reservedmem_of_table[];
const struct of_device_id *i;
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
reservedmem_of_init_fn initfn = i->data;
const char *compat = i->compatible;
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
continue;
if (initfn(rmem) == 0) {
pr_info("Reserved memory: initialized node %s, compatible id %s\n",
rmem->name, compat);
return 0;
}
}
return -ENOENT;
}
/**
* fdt_init_reserved_mem - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem(void)
{
int i;
for (i = 0; i < reserved_mem_count; i++) {
struct reserved_mem *rmem = &reserved_mem[i];
unsigned long node = rmem->fdt_node;
int len;
const __be32 *prop;
int err = 0;
prop = of_get_flat_dt_prop(node, "phandle", &len);
if (!prop)
prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
if (prop)
rmem->phandle = of_read_number(prop, len/4);
if (rmem->size == 0)
err = __reserved_mem_alloc_size(node, rmem->name,
&rmem->base, &rmem->size);
if (err == 0)
__reserved_mem_init_node(rmem);
}
}
static inline struct reserved_mem *__find_rmem(struct device_node *node)
{
unsigned int i;
if (!node->phandle)
return NULL;
for (i = 0; i < reserved_mem_count; i++)
if (reserved_mem[i].phandle == node->phandle)
return &reserved_mem[i];
return NULL;
}
/**
* of_reserved_mem_device_init() - assign reserved memory region to given device
*
* This function assign memory region pointed by "memory-region" device tree
* property to the given device.
*/
int of_reserved_mem_device_init(struct device *dev)
{
struct reserved_mem *rmem;
struct device_node *np;
int ret;
np = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!np)
return -ENODEV;
rmem = __find_rmem(np);
of_node_put(np);
if (!rmem || !rmem->ops || !rmem->ops->device_init)
return -EINVAL;
ret = rmem->ops->device_init(rmem, dev);
if (ret == 0)
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
return ret;
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init);
/**
* of_reserved_mem_device_release() - release reserved memory device structures
*
* This function releases structures allocated for memory region handling for
* the given device.
*/
void of_reserved_mem_device_release(struct device *dev)
{
struct reserved_mem *rmem;
struct device_node *np;
np = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!np)
return;
rmem = __find_rmem(np);
of_node_put(np);
if (!rmem || !rmem->ops || !rmem->ops->device_release)
return;
rmem->ops->device_release(rmem, dev);
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
| gpl-2.0 |
openedev/streak_4.05_kernel | arch/microblaze/mm/init.c | 721 | 9908 | /*
* Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h> /* mem_init */
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <linux/pfn.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/tlb.h>
/* Use for MMU and noMMU because of PCI generic code */
int mem_init_done;
#ifndef CONFIG_MMU
unsigned int __page_offset;
EXPORT_SYMBOL(__page_offset);
#else
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
static int init_bootmem_done;
#endif /* CONFIG_MMU */
char *klimit = _end;
/*
* Initialize the bootmem system and give it all the memory we
* have available.
*/
unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size;
EXPORT_SYMBOL(memory_size);
/*
* paging_init() sets up the page tables - in fact we've already done this.
*/
static void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
/* Clean every zones */
memset(zones_size, 0, sizeof(zones_size));
/*
* old: we can DMA to/from any address.put all page into ZONE_DMA
* We use only ZONE_NORMAL
*/
zones_size[ZONE_NORMAL] = max_mapnr;
free_area_init(zones_size);
}
void __init setup_memory(void)
{
int i;
unsigned long map_size;
#ifndef CONFIG_MMU
u32 kernel_align_start, kernel_align_size;
/* Find main memory where is the kernel */
for (i = 0; i < memblock.memory.cnt; i++) {
memory_start = (u32) memblock.memory.region[i].base;
memory_end = (u32) memblock.memory.region[i].base
+ (u32) memblock.memory.region[i].size;
if ((memory_start <= (u32)_text) &&
((u32)_text <= memory_end)) {
memory_size = memory_end - memory_start;
PAGE_OFFSET = memory_start;
printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, "
"size 0x%08x\n", __func__, (u32) memory_start,
(u32) memory_end, (u32) memory_size);
break;
}
}
if (!memory_start || !memory_end) {
panic("%s: Missing memory setting 0x%08x-0x%08x\n",
__func__, (u32) memory_start, (u32) memory_end);
}
/* reservation of region where is the kernel */
kernel_align_start = PAGE_DOWN((u32)_text);
/* ALIGN can be remove because _end in vmlinux.lds.S is align */
kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
memblock_reserve(kernel_align_start, kernel_align_size);
printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
__func__, kernel_align_start, kernel_align_start
+ kernel_align_size, kernel_align_size);
#endif
/*
* Kernel:
* start: base phys address of kernel - page align
* end: base phys address of kernel - page align
*
* min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
* max_low_pfn
* max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
* num_physpages - number of all pages
*/
/* memory start is from the kernel end (aligned) to higher addr */
min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
/* RAM is assumed contiguous */
num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT;
printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
/*
* Find an area to use for the bootmem bitmap.
* We look for the first area which is at least
* 128kB in length (128kB is enough for a bitmap
* for 4GB of memory, using 4kB pages), plus 1 page
* (in case the address isn't page-aligned).
*/
#ifndef CONFIG_MMU
map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)),
min_low_pfn, max_low_pfn);
#else
map_size = init_bootmem_node(&contig_page_data,
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
#endif
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* free bootmem is whole main memory */
free_bootmem(memory_start, memory_size);
/* reserve allocate blocks */
for (i = 0; i < memblock.reserved.cnt; i++) {
pr_debug("reserved %d - 0x%08x-0x%08x\n", i,
(u32) memblock.reserved.region[i].base,
(u32) memblock_size_bytes(&memblock.reserved, i));
reserve_bootmem(memblock.reserved.region[i].base,
memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT);
}
#ifdef CONFIG_MMU
init_bootmem_done = 1;
#endif
paging_init();
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
}
printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n",
(int)(pages * (PAGE_SIZE / 1024)));
}
#endif
void free_initmem(void)
{
free_init_pages("unused kernel memory",
(unsigned long)(&__init_begin),
(unsigned long)(&__init_end));
}
void __init mem_init(void)
{
high_memory = (void *)__va(memory_end);
/* this will put all memory onto the freelists */
totalram_pages += free_all_bootmem();
printk(KERN_INFO "Memory: %luk/%luk available\n",
nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10));
mem_init_done = 1;
}
#ifndef CONFIG_MMU
int page_is_ram(unsigned long pfn)
{
return __range_ok(pfn, 0);
}
#else
int page_is_ram(unsigned long pfn)
{
return pfn < max_low_pfn;
}
/*
* Check for command-line options that affect what MMU_init will do.
*/
static void mm_cmdline_setup(void)
{
unsigned long maxmem = 0;
char *p = cmd_line;
/* Look for mem= option on command line */
p = strstr(cmd_line, "mem=");
if (p) {
p += 4;
maxmem = memparse(p, &p);
if (maxmem && memory_size > maxmem) {
memory_size = maxmem;
memory_end = memory_start + memory_size;
memblock.memory.region[0].size = memory_size;
}
}
}
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
static void __init mmu_init_hw(void)
{
/*
* The Zone Protection Register (ZPR) defines how protection will
* be applied to every page which is a member of a given zone. At
* present, we utilize only two of the zones.
* The zone index bits (of ZSEL) in the PTE are used for software
* indicators, except the LSB. For user access, zone 1 is used,
* for kernel access, zone 0 is used. We set all but zone 1
* to zero, allowing only kernel access as indicated in the PTE.
* For zone 1, we set a 01 binary (a value of 10 will not work)
* to allow user access as indicated in the PTE. This also allows
* kernel access as indicated in the PTE.
*/
__asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
"mts rzpr, r11;"
: : : "r11");
}
/*
* MMU_init sets up the basic memory mappings for the kernel,
* including both RAM and possibly some I/O regions,
* and sets up the page tables and the MMU hardware ready to go.
*/
/* called from head.S */
asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
if (!memblock.reserved.cnt) {
printk(KERN_EMERG "Error memory count\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.region[0].size < 0x1000000) {
printk(KERN_EMERG "Memory must be greater than 16MB\n");
machine_restart(NULL);
}
/* Find main memory where the kernel is */
memory_start = (u32) memblock.memory.region[0].base;
memory_end = (u32) memblock.memory.region[0].base +
(u32) memblock.memory.region[0].size;
memory_size = memory_end - memory_start;
mm_cmdline_setup(); /* FIXME parse args from command line - not used */
/*
* Map out the kernel text/data/bss from the available physical
* memory.
*/
kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
/* kernel size */
ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
memblock_reserve(kstart, ksize);
#if defined(CONFIG_BLK_DEV_INITRD)
/* Remove the init RAM disk from the available memory. */
/* if (initrd_start) {
mem_pieces_remove(&phys_avail, __pa(initrd_start),
initrd_end - initrd_start, 1);
}*/
#endif /* CONFIG_BLK_DEV_INITRD */
/* Initialize the MMU hardware */
mmu_init_hw();
/* Map in all of RAM starting at CONFIG_KERNEL_START */
mapin_ram();
#ifdef HIGHMEM_START_BOOL
ioremap_base = HIGHMEM_START;
#else
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
#endif /* CONFIG_HIGHMEM */
ioremap_bot = ioremap_base;
/* Initialize the context management stuff */
mmu_context_init();
}
/* This is only called until mem_init is done. */
void __init *early_get_page(void)
{
void *p;
if (init_bootmem_done) {
p = alloc_bootmem_pages(PAGE_SIZE);
} else {
/*
* Mem start + 32MB -> here is limit
* because of mem mapping from head.S
*/
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
memory_start + 0x2000000));
}
return p;
}
#endif /* CONFIG_MMU */
void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
{
if (mem_init_done)
return kmalloc(size, mask);
else
return alloc_bootmem(size);
}
void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
if (mem_init_done)
p = kzalloc(size, mask);
else {
p = alloc_bootmem(size);
if (p)
memset(p, 0, size);
}
return p;
}
| gpl-2.0 |
gnehzuil/ali_kernel | arch/x86/mm/kmemcheck/opcode.c | 1489 | 1946 | #include <linux/types.h>
#include "opcode.h"
static bool opcode_is_prefix(uint8_t b)
{
return
/* Group 1 */
b == 0xf0 || b == 0xf2 || b == 0xf3
/* Group 2 */
|| b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
|| b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e
/* Group 3 */
|| b == 0x66
/* Group 4 */
|| b == 0x67;
}
#ifdef CONFIG_X86_64
static bool opcode_is_rex_prefix(uint8_t b)
{
return (b & 0xf0) == 0x40;
}
#else
static bool opcode_is_rex_prefix(uint8_t b)
{
return false;
}
#endif
#define REX_W (1 << 3)
/*
* This is a VERY crude opcode decoder. We only need to find the size of the
* load/store that caused our #PF and this should work for all the opcodes
* that we care about. Moreover, the ones who invented this instruction set
* should be shot.
*/
void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
{
/* Default operand size */
int operand_size_override = 4;
/* prefixes */
for (; opcode_is_prefix(*op); ++op) {
if (*op == 0x66)
operand_size_override = 2;
}
/* REX prefix */
if (opcode_is_rex_prefix(*op)) {
uint8_t rex = *op;
++op;
if (rex & REX_W) {
switch (*op) {
case 0x63:
*size = 4;
return;
case 0x0f:
++op;
switch (*op) {
case 0xb6:
case 0xbe:
*size = 1;
return;
case 0xb7:
case 0xbf:
*size = 2;
return;
}
break;
}
*size = 8;
return;
}
}
/* escape opcode */
if (*op == 0x0f) {
++op;
/*
* This is move with zero-extend and sign-extend, respectively;
* we don't have to think about 0xb6/0xbe, because this is
* already handled in the conditional below.
*/
if (*op == 0xb7 || *op == 0xbf)
operand_size_override = 2;
}
*size = (*op & 1) ? operand_size_override : 1;
}
const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
{
/* skip prefixes */
while (opcode_is_prefix(*op))
++op;
if (opcode_is_rex_prefix(*op))
++op;
return op;
}
| gpl-2.0 |
NookieDevs/android_kernel_bn_encore | arch/x86/kernel/cpu/amd.c | 1489 | 18702 | #include <linux/init.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/pci-direct.h>
#ifdef CONFIG_X86_64
# include <asm/numa_64.h>
# include <asm/mmconfig.h>
# include <asm/cacheflush.h>
#endif
#include "cpu.h"
#ifdef CONFIG_X86_32
/*
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause
* misexecution of code under Linux. Owners of such processors should
* contact AMD for precise details and a CPU swap.
*
* See http://www.multimania.com/poulot/k6bug.html
* http://www.amd.com/K6/k6docs/revgd.html
*
* The following test is erm.. interesting. AMD neglected to up
* the chip setting when fixing the bug but they also tweaked some
* performance at the same time..
*/
extern void vide(void);
__asm__(".align 4\nvide: ret");
static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
{
/*
* General Systems BIOSen alias the cpu frequency registers
* of the Elan at 0x000df000. Unfortuantly, one of the Linux
* drivers subsequently pokes it, and changes the CPU speed.
* Workaround : Remove the unneeded alias.
*/
#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
#define CBAR_ENB (0x80000000)
#define CBAR_KEY (0X000000CB)
if (c->x86_model == 9 || c->x86_model == 10) {
if (inl(CBAR) & CBAR_ENB)
outl(0 | CBAR_KEY, CBAR);
}
}
static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
{
u32 l, h;
int mbytes = num_physpages >> (20-PAGE_SHIFT);
if (c->x86_model < 6) {
/* Based on AMD doc 20734R - June 2000 */
if (c->x86_model == 0) {
clear_cpu_cap(c, X86_FEATURE_APIC);
set_cpu_cap(c, X86_FEATURE_PGE);
}
return;
}
if (c->x86_model == 6 && c->x86_mask == 1) {
const int K6_BUG_LOOP = 1000000;
int n;
void (*f_vide)(void);
unsigned long d, d2;
printk(KERN_INFO "AMD K6 stepping B detected - ");
/*
* It looks like AMD fixed the 2.6.2 bug and improved indirect
* calls at the same time.
*/
n = K6_BUG_LOOP;
f_vide = vide;
rdtscl(d);
while (n--)
f_vide();
rdtscl(d2);
d = d2-d;
if (d > 20*K6_BUG_LOOP)
printk(KERN_CONT
"system stability may be impaired when more than 32 MB are used.\n");
else
printk(KERN_CONT "probably OK (after B9730xxxx).\n");
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
}
/* K6 with old style WHCR */
if (c->x86_model < 8 ||
(c->x86_model == 8 && c->x86_mask < 8)) {
/* We can only write allocate on the low 508Mb */
if (mbytes > 508)
mbytes = 508;
rdmsr(MSR_K6_WHCR, l, h);
if ((l&0x0000FFFF) == 0) {
unsigned long flags;
l = (1<<0)|((mbytes/4)<<1);
local_irq_save(flags);
wbinvd();
wrmsr(MSR_K6_WHCR, l, h);
local_irq_restore(flags);
printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
mbytes);
}
return;
}
if ((c->x86_model == 8 && c->x86_mask > 7) ||
c->x86_model == 9 || c->x86_model == 13) {
/* The more serious chips .. */
if (mbytes > 4092)
mbytes = 4092;
rdmsr(MSR_K6_WHCR, l, h);
if ((l&0xFFFF0000) == 0) {
unsigned long flags;
l = ((mbytes>>2)<<22)|(1<<16);
local_irq_save(flags);
wbinvd();
wrmsr(MSR_K6_WHCR, l, h);
local_irq_restore(flags);
printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
mbytes);
}
return;
}
if (c->x86_model == 10) {
/* AMD Geode LX is model 10 */
/* placeholder for any needed mods */
return;
}
}
static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
/*
* Certain Athlons might work (for various values of 'work') in SMP
* but they are not certified as MP capable.
*/
/* Athlon 660/661 is valid. */
if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
(c->x86_mask == 1)))
goto valid_k7;
/* Duron 670 is valid */
if ((c->x86_model == 7) && (c->x86_mask == 0))
goto valid_k7;
/*
* Athlon 662, Duron 671, and Athlon >model 7 have capability
* bit. It's worth noting that the A5 stepping (662) of some
* Athlon XP's have the MP bit set.
* See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
* more.
*/
if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
((c->x86_model == 7) && (c->x86_mask >= 1)) ||
(c->x86_model > 7))
if (cpu_has_mp)
goto valid_k7;
/* If we get here, not a certified SMP capable AMD system. */
/*
* Don't taint if we are running SMP kernel on a single non-MP
* approved Athlon
*/
WARN_ONCE(1, "WARNING: This combination of AMD"
" processors is not suitable for SMP.\n");
if (!test_taint(TAINT_UNSAFE_SMP))
add_taint(TAINT_UNSAFE_SMP);
valid_k7:
;
#endif
}
static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
{
u32 l, h;
/*
* Bit 15 of Athlon specific MSR 15, needs to be 0
* to enable SSE on Palomino/Morgan/Barton CPU's.
* If the BIOS didn't enable it already, enable it here.
*/
if (c->x86_model >= 6 && c->x86_model <= 10) {
if (!cpu_has(c, X86_FEATURE_XMM)) {
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
rdmsr(MSR_K7_HWCR, l, h);
l &= ~0x00008000;
wrmsr(MSR_K7_HWCR, l, h);
set_cpu_cap(c, X86_FEATURE_XMM);
}
}
/*
* It's been determined by AMD that Athlons since model 8 stepping 1
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
* As per AMD technical note 27212 0.2
*/
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) {
printk(KERN_INFO
"CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
l, ((l & 0x000fffff)|0x20000000));
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
}
}
set_cpu_cap(c, X86_FEATURE_K7);
amd_k7_smp_check(c);
}
#endif
#ifdef CONFIG_NUMA
/*
* To workaround broken NUMA config. Read the comment in
* srat_detect_node().
*/
static int __cpuinit nearby_node(int apicid)
{
int i, node;
for (i = apicid - 1; i >= 0; i--) {
node = __apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node))
return node;
}
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
node = __apicid_to_node[i];
if (node != NUMA_NO_NODE && node_online(node))
return node;
}
return first_node(node_online_map); /* Shouldn't happen */
}
#endif
/*
* Fixup core topology information for
* (1) AMD multi-node processors
* Assumption: Number of cores in each internal node is the same.
* (2) AMD processors supporting compute units
*/
#ifdef CONFIG_X86_HT
static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
{
u32 nodes, cores_per_cu = 1;
u8 node_id;
int cpu = smp_processor_id();
/* get information required for multi-node processors */
if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
nodes = ((ecx >> 8) & 7) + 1;
node_id = ecx & 7;
/* get compute unit information */
smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->compute_unit_id = ebx & 0xff;
cores_per_cu += ((ebx >> 8) & 3);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
nodes = ((value >> 3) & 7) + 1;
node_id = value & 7;
} else
return;
/* fixup multi-node processor information */
if (nodes > 1) {
u32 cores_per_node;
u32 cus_per_node;
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
cores_per_node = c->x86_max_cores / nodes;
cus_per_node = cores_per_node / cores_per_cu;
/* store NodeID, use llc_shared_map to store sibling info */
per_cpu(cpu_llc_id, cpu) = node_id;
/* core id has to be in the [0 .. cores_per_node - 1] range */
c->cpu_core_id %= cores_per_node;
c->compute_unit_id %= cus_per_node;
}
}
#endif
/*
* On a AMD dual core setup the lower bits of the APIC id distingush the cores.
* Assumes number of cores is a power of two.
*/
static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
unsigned bits;
int cpu = smp_processor_id();
bits = c->x86_coreid_bits;
/* Low order bits define the core id (index of core in socket) */
c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
amd_get_topology(c);
#endif
}
int amd_get_nb_id(int cpu)
{
int id = 0;
#ifdef CONFIG_SMP
id = per_cpu(cpu_llc_id, cpu);
#endif
return id;
}
EXPORT_SYMBOL_GPL(amd_get_nb_id);
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_NUMA
int cpu = smp_processor_id();
int node;
unsigned apicid = c->apicid;
node = numa_cpu_node(cpu);
if (node == NUMA_NO_NODE)
node = per_cpu(cpu_llc_id, cpu);
if (!node_online(node)) {
/*
* Two possibilities here:
*
* - The CPU is missing memory and no node was created. In
* that case try picking one from a nearby CPU.
*
* - The APIC IDs differ from the HyperTransport node IDs
* which the K8 northbridge parsing fills in. Assume
* they are all increased by a constant offset, but in
* the same order as the HT nodeids. If that doesn't
* result in a usable node fall back to the path for the
* previous case.
*
* This workaround operates directly on the mapping between
* APIC ID and NUMA node, assuming certain relationship
* between APIC ID, HT node ID and NUMA topology. As going
* through CPU mapping may alter the outcome, directly
* access __apicid_to_node[].
*/
int ht_nodeid = c->initial_apicid;
if (ht_nodeid >= 0 &&
__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = __apicid_to_node[ht_nodeid];
/* Pick a nearby node */
if (!node_online(node))
node = nearby_node(apicid);
}
numa_set_node(cpu, node);
#endif
}
static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
unsigned bits, ecx;
/* Multi core CPU? */
if (c->extended_cpuid_level < 0x80000008)
return;
ecx = cpuid_ecx(0x80000008);
c->x86_max_cores = (ecx & 0xff) + 1;
/* CPU telling us the core id bits shift? */
bits = (ecx >> 12) & 0xF;
/* Otherwise recompute */
if (bits == 0) {
while ((1 << bits) < c->x86_max_cores)
bits++;
}
c->x86_coreid_bits = bits;
#endif
}
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
early_init_amd_mc(c);
/*
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states
*/
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
}
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#else
/* Set MTRR capability flag if appropriate */
if (c->x86 == 5)
if (c->x86_model == 13 || c->x86_model == 9 ||
(c->x86_model == 8 && c->x86_mask >= 8))
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
#endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
/* check CPU config space for extended APIC ID */
if (cpu_has_apic && c->x86 >= 0xf) {
unsigned int val;
val = read_pci_config(0, 24, 0, 0x68);
if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
}
#endif
/* We need to do the following only once */
if (c != &boot_cpu_data)
return;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
if (c->x86 > 0x10 ||
(c->x86 == 0x10 && c->x86_model >= 0x2)) {
u64 val;
rdmsrl(MSR_K7_HWCR, val);
if (!(val & BIT(24)))
printk(KERN_WARNING FW_BUG "TSC doesn't count "
"with P0 frequency!\n");
}
}
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned long long value;
/*
* Disable TLB flush filter by setting HWCR.FFDIS on K8
* bit 6 of msr C001_0015
*
* Errata 63 for SH-B3 steppings
* Errata 122 for all steppings (F+ have it disabled by default)
*/
if (c->x86 == 0xf) {
rdmsrl(MSR_K7_HWCR, value);
value |= 1 << 6;
wrmsrl(MSR_K7_HWCR, value);
}
#endif
early_init_amd(c);
/*
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
*/
clear_cpu_cap(c, 0*32+31);
#ifdef CONFIG_X86_64
/* On C+ stepping K8 rep microcode works well for copy/memset */
if (c->x86 == 0xf) {
u32 level;
level = cpuid_eax(1);
if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/*
* Some BIOSes incorrectly force this feature, but only K8
* revision D (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759).
*/
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
u64 val;
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &val)) {
val &= ~(1ULL << 32);
wrmsrl_amd_safe(0xc001100d, val);
}
}
}
if (c->x86 >= 0x10)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/* get apicid instead of initial apic id from cpuid */
c->apicid = hard_smp_processor_id();
#else
/*
* FIXME: We should handle the K5 here. Set up the write
* range and also turn on MSR 83 bits 4 and 31 (write alloc,
* no bus pipeline)
*/
switch (c->x86) {
case 4:
init_amd_k5(c);
break;
case 5:
init_amd_k6(c);
break;
case 6: /* An Athlon/Duron */
init_amd_k7(c);
break;
}
/* K6s reports MCEs but don't actually have all the MSRs */
if (c->x86 < 6)
clear_cpu_cap(c, X86_FEATURE_MCE);
#endif
/* Enable workaround for FXSAVE leak */
if (c->x86 >= 6)
set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
if (!c->x86_model_id[0]) {
switch (c->x86) {
case 0xf:
/* Should distinguish Models here, but this is only
a fallback anyways. */
strcpy(c->x86_model_id, "Hammer");
break;
}
}
cpu_detect_cache_sizes(c);
/* Multi core CPU? */
if (c->extended_cpuid_level >= 0x80000008) {
amd_detect_cmp(c);
srat_detect_node(c);
}
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
if (c->extended_cpuid_level >= 0x80000006) {
if (cpuid_edx(0x80000006) & 0xf000)
num_cache_leaves = 4;
else
num_cache_leaves = 3;
}
if (c->x86 >= 0xf)
set_cpu_cap(c, X86_FEATURE_K8);
if (cpu_has_xmm2) {
/* MFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
}
#ifdef CONFIG_X86_64
if (c->x86 == 0x10) {
/* do this for boot cpu */
if (c == &boot_cpu_data)
check_enable_amd_mmconf_dmi();
fam10h_check_enable_mmcfg();
}
if (c == &boot_cpu_data && c->x86 >= 0xf) {
unsigned long long tseg;
/*
* Split up direct mapping around the TSEG SMM area.
* Don't do it for gbpages because there seems very little
* benefit in doing so.
*/
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
printk(KERN_DEBUG "tseg: %010llx\n", tseg);
if ((tseg>>PMD_SHIFT) <
(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
((tseg>>PMD_SHIFT) <
(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
set_memory_4k((unsigned long)__va(tseg), 1);
}
}
#endif
/*
* Family 0x12 and above processors have APIC timer
* running in deep C states.
*/
if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT);
/*
* Disable GART TLB Walk Errors on Fam10h. We do this here
* because this is always needed when GART is enabled, even in a
* kernel which has no MCE support built in.
*/
if (c->x86 == 0x10) {
/*
* BIOS should disable GartTlbWlk Errors themself. If
* it doesn't do it here as suggested by the BKDG.
*
* Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
*/
u64 mask;
int err;
err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
if (err == 0) {
mask |= (1 << 10);
checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
}
}
}
#ifdef CONFIG_X86_32
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
unsigned int size)
{
/* AMD errata T13 (order #21922) */
if ((c->x86 == 6)) {
/* Duron Rev A0 */
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
/* Tbird rev A1/A2 */
if (c->x86_model == 4 &&
(c->x86_mask == 0 || c->x86_mask == 1))
size = 256;
}
return size;
}
#endif
static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
.c_vendor = "AMD",
.c_ident = { "AuthenticAMD" },
#ifdef CONFIG_X86_32
.c_models = {
{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
{
[3] = "486 DX/2",
[7] = "486 DX/2-WB",
[8] = "486 DX/4",
[9] = "486 DX/4-WB",
[14] = "Am5x86-WT",
[15] = "Am5x86-WB"
}
},
},
.c_size_cache = amd_size_cache,
#endif
.c_early_init = early_init_amd,
.c_init = init_amd,
.c_x86_vendor = X86_VENDOR_AMD,
};
cpu_dev_register(amd_cpu_dev);
/*
* AMD errata checking
*
* Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
* AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
* have an OSVW id assigned, which it takes as first argument. Both take a
* variable number of family-specific model-stepping ranges created by
* AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
* int[] in arch/x86/include/asm/processor.h.
*
* Example:
*
* const int amd_erratum_319[] =
* AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
* AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
* AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
*/
const int amd_erratum_400[] =
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
EXPORT_SYMBOL_GPL(amd_erratum_400);
const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
EXPORT_SYMBOL_GPL(amd_erratum_383);
bool cpu_has_amd_erratum(const int *erratum)
{
struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
int osvw_id = *erratum++;
u32 range;
u32 ms;
/*
* If called early enough that current_cpu_data hasn't been initialized
* yet, fall back to boot_cpu_data.
*/
if (cpu->x86 == 0)
cpu = &boot_cpu_data;
if (cpu->x86_vendor != X86_VENDOR_AMD)
return false;
if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;
rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
if (osvw_id < osvw_len) {
u64 osvw_bits;
rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
osvw_bits);
return osvw_bits & (1ULL << (osvw_id & 0x3f));
}
}
/* OSVW unavailable or ID unknown, match family-model-stepping range */
ms = (cpu->x86_model << 4) | cpu->x86_mask;
while ((range = *erratum++))
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
(ms >= AMD_MODEL_RANGE_START(range)) &&
(ms <= AMD_MODEL_RANGE_END(range)))
return true;
return false;
}
EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
| gpl-2.0 |
Jazz-823/kernel_lge_hammerhead_CM | drivers/usb/gadget/u_data_hsic.c | 2001 | 29331 | /* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/bitops.h>
#include <linux/termios.h>
#include <mach/usb_bridge.h>
#include <mach/usb_gadget_xport.h>
static unsigned int no_data_ports;
#define GHSIC_DATA_RMNET_RX_Q_SIZE 50
#define GHSIC_DATA_RMNET_TX_Q_SIZE 300
#define GHSIC_DATA_SERIAL_RX_Q_SIZE 10
#define GHSIC_DATA_SERIAL_TX_Q_SIZE 20
#define GHSIC_DATA_RX_REQ_SIZE 2048
#define GHSIC_DATA_TX_INTR_THRESHOLD 20
static unsigned int ghsic_data_rmnet_tx_q_size = GHSIC_DATA_RMNET_TX_Q_SIZE;
module_param(ghsic_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR);
static unsigned int ghsic_data_rmnet_rx_q_size = GHSIC_DATA_RMNET_RX_Q_SIZE;
module_param(ghsic_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR);
static unsigned int ghsic_data_serial_tx_q_size = GHSIC_DATA_SERIAL_TX_Q_SIZE;
module_param(ghsic_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR);
static unsigned int ghsic_data_serial_rx_q_size = GHSIC_DATA_SERIAL_RX_Q_SIZE;
module_param(ghsic_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR);
static unsigned int ghsic_data_rx_req_size = GHSIC_DATA_RX_REQ_SIZE;
module_param(ghsic_data_rx_req_size, uint, S_IRUGO | S_IWUSR);
unsigned int ghsic_data_tx_intr_thld = GHSIC_DATA_TX_INTR_THRESHOLD;
module_param(ghsic_data_tx_intr_thld, uint, S_IRUGO | S_IWUSR);
/*flow ctrl*/
#define GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD 500
#define GHSIC_DATA_FLOW_CTRL_DISABLE 300
#define GHSIC_DATA_FLOW_CTRL_SUPPORT 1
#define GHSIC_DATA_PENDLIMIT_WITH_BRIDGE 500
static unsigned int ghsic_data_fctrl_support = GHSIC_DATA_FLOW_CTRL_SUPPORT;
module_param(ghsic_data_fctrl_support, uint, S_IRUGO | S_IWUSR);
static unsigned int ghsic_data_fctrl_en_thld =
GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD;
module_param(ghsic_data_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
static unsigned int ghsic_data_fctrl_dis_thld = GHSIC_DATA_FLOW_CTRL_DISABLE;
module_param(ghsic_data_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
static unsigned int ghsic_data_pend_limit_with_bridge =
GHSIC_DATA_PENDLIMIT_WITH_BRIDGE;
module_param(ghsic_data_pend_limit_with_bridge, uint, S_IRUGO | S_IWUSR);
#define CH_OPENED 0
#define CH_READY 1
struct gdata_port {
/* port */
unsigned port_num;
/* gadget */
atomic_t connected;
struct usb_ep *in;
struct usb_ep *out;
enum gadget_type gtype;
/* data transfer queues */
unsigned int tx_q_size;
struct list_head tx_idle;
struct sk_buff_head tx_skb_q;
spinlock_t tx_lock;
unsigned int rx_q_size;
struct list_head rx_idle;
struct sk_buff_head rx_skb_q;
spinlock_t rx_lock;
/* work */
struct workqueue_struct *wq;
struct work_struct connect_w;
struct work_struct disconnect_w;
struct work_struct write_tomdm_w;
struct work_struct write_tohost_w;
struct bridge brdg;
/*bridge status*/
unsigned long bridge_sts;
unsigned int n_tx_req_queued;
/*counters*/
unsigned long to_modem;
unsigned long to_host;
unsigned int rx_throttled_cnt;
unsigned int rx_unthrottled_cnt;
unsigned int tx_throttled_cnt;
unsigned int tx_unthrottled_cnt;
unsigned int tomodem_drp_cnt;
unsigned int unthrottled_pnd_skbs;
};
static struct {
struct gdata_port *port;
struct platform_driver pdrv;
char port_name[BRIDGE_NAME_MAX_LEN];
} gdata_ports[NUM_PORTS];
static unsigned int get_timestamp(void);
static void dbg_timestamp(char *, struct sk_buff *);
static void ghsic_data_start_rx(struct gdata_port *port);
static void ghsic_data_free_requests(struct usb_ep *ep, struct list_head *head)
{
struct usb_request *req;
while (!list_empty(head)) {
req = list_entry(head->next, struct usb_request, list);
list_del(&req->list);
usb_ep_free_request(ep, req);
}
}
static int ghsic_data_alloc_requests(struct usb_ep *ep, struct list_head *head,
int num,
void (*cb)(struct usb_ep *ep, struct usb_request *),
spinlock_t *lock)
{
int i;
struct usb_request *req;
unsigned long flags;
pr_debug("%s: ep:%s head:%p num:%d cb:%p", __func__,
ep->name, head, num, cb);
for (i = 0; i < num; i++) {
req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!req) {
pr_debug("%s: req allocated:%d\n", __func__, i);
return list_empty(head) ? -ENOMEM : 0;
}
req->complete = cb;
spin_lock_irqsave(lock, flags);
list_add(&req->list, head);
spin_unlock_irqrestore(lock, flags);
}
return 0;
}
static void ghsic_data_unthrottle_tx(void *ctx)
{
struct gdata_port *port = ctx;
unsigned long flags;
if (!port || !atomic_read(&port->connected))
return;
spin_lock_irqsave(&port->rx_lock, flags);
port->tx_unthrottled_cnt++;
spin_unlock_irqrestore(&port->rx_lock, flags);
queue_work(port->wq, &port->write_tomdm_w);
pr_debug("%s: port num =%d unthrottled\n", __func__,
port->port_num);
}
static void ghsic_data_write_tohost(struct work_struct *w)
{
unsigned long flags;
struct sk_buff *skb;
int ret;
struct usb_request *req;
struct usb_ep *ep;
struct gdata_port *port;
struct timestamp_info *info;
port = container_of(w, struct gdata_port, write_tohost_w);
if (!port)
return;
spin_lock_irqsave(&port->tx_lock, flags);
ep = port->in;
if (!ep) {
spin_unlock_irqrestore(&port->tx_lock, flags);
return;
}
while (!list_empty(&port->tx_idle)) {
skb = __skb_dequeue(&port->tx_skb_q);
if (!skb)
break;
req = list_first_entry(&port->tx_idle, struct usb_request,
list);
req->context = skb;
req->buf = skb->data;
req->length = skb->len;
req->zero = 1;
port->n_tx_req_queued++;
if (port->n_tx_req_queued == ghsic_data_tx_intr_thld) {
req->no_interrupt = 0;
port->n_tx_req_queued = 0;
} else {
req->no_interrupt = 1;
}
list_del(&req->list);
info = (struct timestamp_info *)skb->cb;
info->tx_queued = get_timestamp();
spin_unlock_irqrestore(&port->tx_lock, flags);
ret = usb_ep_queue(ep, req, GFP_KERNEL);
spin_lock_irqsave(&port->tx_lock, flags);
if (ret) {
pr_err("%s: usb epIn failed\n", __func__);
list_add(&req->list, &port->tx_idle);
dev_kfree_skb_any(skb);
break;
}
port->to_host++;
if (ghsic_data_fctrl_support &&
port->tx_skb_q.qlen <= ghsic_data_fctrl_dis_thld &&
test_and_clear_bit(RX_THROTTLED, &port->brdg.flags)) {
port->rx_unthrottled_cnt++;
port->unthrottled_pnd_skbs = port->tx_skb_q.qlen;
pr_debug_ratelimited("%s: disable flow ctrl:"
" tx skbq len: %u\n",
__func__, port->tx_skb_q.qlen);
data_bridge_unthrottle_rx(port->brdg.ch_id);
}
}
spin_unlock_irqrestore(&port->tx_lock, flags);
}
static int ghsic_data_receive(void *p, void *data, size_t len)
{
struct gdata_port *port = p;
unsigned long flags;
struct sk_buff *skb = data;
if (!port || !atomic_read(&port->connected)) {
dev_kfree_skb_any(skb);
return -ENOTCONN;
}
pr_debug("%s: p:%p#%d skb_len:%d\n", __func__,
port, port->port_num, skb->len);
spin_lock_irqsave(&port->tx_lock, flags);
__skb_queue_tail(&port->tx_skb_q, skb);
if (ghsic_data_fctrl_support &&
port->tx_skb_q.qlen >= ghsic_data_fctrl_en_thld) {
set_bit(RX_THROTTLED, &port->brdg.flags);
port->rx_throttled_cnt++;
pr_debug_ratelimited("%s: flow ctrl enabled: tx skbq len: %u\n",
__func__, port->tx_skb_q.qlen);
spin_unlock_irqrestore(&port->tx_lock, flags);
queue_work(port->wq, &port->write_tohost_w);
return -EBUSY;
}
spin_unlock_irqrestore(&port->tx_lock, flags);
queue_work(port->wq, &port->write_tohost_w);
return 0;
}
static void ghsic_data_write_tomdm(struct work_struct *w)
{
struct gdata_port *port;
struct sk_buff *skb;
struct timestamp_info *info;
unsigned long flags;
int ret;
port = container_of(w, struct gdata_port, write_tomdm_w);
if (!port || !atomic_read(&port->connected))
return;
spin_lock_irqsave(&port->rx_lock, flags);
if (test_bit(TX_THROTTLED, &port->brdg.flags)) {
spin_unlock_irqrestore(&port->rx_lock, flags);
goto start_rx;
}
while ((skb = __skb_dequeue(&port->rx_skb_q))) {
pr_debug("%s: port:%p tom:%lu pno:%d\n", __func__,
port, port->to_modem, port->port_num);
info = (struct timestamp_info *)skb->cb;
info->rx_done_sent = get_timestamp();
spin_unlock_irqrestore(&port->rx_lock, flags);
ret = data_bridge_write(port->brdg.ch_id, skb);
spin_lock_irqsave(&port->rx_lock, flags);
if (ret < 0) {
if (ret == -EBUSY) {
/*flow control*/
port->tx_throttled_cnt++;
break;
}
pr_err_ratelimited("%s: write error:%d\n",
__func__, ret);
port->tomodem_drp_cnt++;
dev_kfree_skb_any(skb);
break;
}
port->to_modem++;
}
spin_unlock_irqrestore(&port->rx_lock, flags);
start_rx:
ghsic_data_start_rx(port);
}
static void ghsic_data_epin_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gdata_port *port = ep->driver_data;
struct sk_buff *skb = req->context;
int status = req->status;
switch (status) {
case 0:
/* successful completion */
dbg_timestamp("DL", skb);
break;
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
dev_kfree_skb_any(skb);
req->buf = 0;
usb_ep_free_request(ep, req);
return;
default:
pr_err("%s: data tx ep error %d\n", __func__, status);
break;
}
dev_kfree_skb_any(skb);
spin_lock(&port->tx_lock);
list_add_tail(&req->list, &port->tx_idle);
spin_unlock(&port->tx_lock);
queue_work(port->wq, &port->write_tohost_w);
}
static void
ghsic_data_epout_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gdata_port *port = ep->driver_data;
struct sk_buff *skb = req->context;
struct timestamp_info *info = (struct timestamp_info *)skb->cb;
int status = req->status;
int queue = 0;
switch (status) {
case 0:
skb_put(skb, req->actual);
queue = 1;
break;
case -ECONNRESET:
case -ESHUTDOWN:
/* cable disconnection */
dev_kfree_skb_any(skb);
req->buf = 0;
usb_ep_free_request(ep, req);
return;
default:
pr_err_ratelimited("%s: %s response error %d, %d/%d\n",
__func__, ep->name, status,
req->actual, req->length);
dev_kfree_skb_any(skb);
break;
}
spin_lock(&port->rx_lock);
if (queue) {
info->rx_done = get_timestamp();
__skb_queue_tail(&port->rx_skb_q, skb);
list_add_tail(&req->list, &port->rx_idle);
queue_work(port->wq, &port->write_tomdm_w);
}
spin_unlock(&port->rx_lock);
}
static void ghsic_data_start_rx(struct gdata_port *port)
{
struct usb_request *req;
struct usb_ep *ep;
unsigned long flags;
int ret;
struct sk_buff *skb;
struct timestamp_info *info;
unsigned int created;
pr_debug("%s: port:%p\n", __func__, port);
if (!port)
return;
spin_lock_irqsave(&port->rx_lock, flags);
ep = port->out;
if (!ep) {
spin_unlock_irqrestore(&port->rx_lock, flags);
return;
}
while (atomic_read(&port->connected) && !list_empty(&port->rx_idle)) {
if (port->rx_skb_q.qlen > ghsic_data_pend_limit_with_bridge)
break;
req = list_first_entry(&port->rx_idle,
struct usb_request, list);
list_del(&req->list);
spin_unlock_irqrestore(&port->rx_lock, flags);
created = get_timestamp();
skb = alloc_skb(ghsic_data_rx_req_size, GFP_KERNEL);
if (!skb) {
spin_lock_irqsave(&port->rx_lock, flags);
list_add(&req->list, &port->rx_idle);
break;
}
info = (struct timestamp_info *)skb->cb;
info->created = created;
req->buf = skb->data;
req->length = ghsic_data_rx_req_size;
req->context = skb;
info->rx_queued = get_timestamp();
ret = usb_ep_queue(ep, req, GFP_KERNEL);
spin_lock_irqsave(&port->rx_lock, flags);
if (ret) {
dev_kfree_skb_any(skb);
pr_err_ratelimited("%s: rx queue failed\n", __func__);
if (atomic_read(&port->connected))
list_add(&req->list, &port->rx_idle);
else
usb_ep_free_request(ep, req);
break;
}
}
spin_unlock_irqrestore(&port->rx_lock, flags);
}
static void ghsic_data_start_io(struct gdata_port *port)
{
unsigned long flags;
struct usb_ep *ep_out, *ep_in;
int ret;
pr_debug("%s: port:%p\n", __func__, port);
if (!port)
return;
spin_lock_irqsave(&port->rx_lock, flags);
ep_out = port->out;
spin_unlock_irqrestore(&port->rx_lock, flags);
if (!ep_out)
return;
ret = ghsic_data_alloc_requests(ep_out, &port->rx_idle,
port->rx_q_size, ghsic_data_epout_complete, &port->rx_lock);
if (ret) {
pr_err("%s: rx req allocation failed\n", __func__);
return;
}
spin_lock_irqsave(&port->tx_lock, flags);
ep_in = port->in;
spin_unlock_irqrestore(&port->tx_lock, flags);
if (!ep_in) {
spin_lock_irqsave(&port->rx_lock, flags);
ghsic_data_free_requests(ep_out, &port->rx_idle);
spin_unlock_irqrestore(&port->rx_lock, flags);
return;
}
ret = ghsic_data_alloc_requests(ep_in, &port->tx_idle,
port->tx_q_size, ghsic_data_epin_complete, &port->tx_lock);
if (ret) {
pr_err("%s: tx req allocation failed\n", __func__);
spin_lock_irqsave(&port->rx_lock, flags);
ghsic_data_free_requests(ep_out, &port->rx_idle);
spin_unlock_irqrestore(&port->rx_lock, flags);
return;
}
/* queue out requests */
ghsic_data_start_rx(port);
}
static void ghsic_data_connect_w(struct work_struct *w)
{
struct gdata_port *port =
container_of(w, struct gdata_port, connect_w);
int ret;
if (!port || !atomic_read(&port->connected) ||
!test_bit(CH_READY, &port->bridge_sts))
return;
pr_debug("%s: port:%p\n", __func__, port);
ret = data_bridge_open(&port->brdg);
if (ret) {
pr_err("%s: unable open bridge ch:%d err:%d\n",
__func__, port->brdg.ch_id, ret);
return;
}
set_bit(CH_OPENED, &port->bridge_sts);
ghsic_data_start_io(port);
}
static void ghsic_data_disconnect_w(struct work_struct *w)
{
struct gdata_port *port =
container_of(w, struct gdata_port, disconnect_w);
if (!test_bit(CH_OPENED, &port->bridge_sts))
return;
data_bridge_close(port->brdg.ch_id);
clear_bit(CH_OPENED, &port->bridge_sts);
}
static void ghsic_data_free_buffers(struct gdata_port *port)
{
struct sk_buff *skb;
unsigned long flags;
if (!port)
return;
spin_lock_irqsave(&port->tx_lock, flags);
if (!port->in) {
spin_unlock_irqrestore(&port->tx_lock, flags);
return;
}
ghsic_data_free_requests(port->in, &port->tx_idle);
while ((skb = __skb_dequeue(&port->tx_skb_q)))
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&port->tx_lock, flags);
spin_lock_irqsave(&port->rx_lock, flags);
if (!port->out) {
spin_unlock_irqrestore(&port->rx_lock, flags);
return;
}
ghsic_data_free_requests(port->out, &port->rx_idle);
while ((skb = __skb_dequeue(&port->rx_skb_q)))
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&port->rx_lock, flags);
}
static int ghsic_data_get_port_id(const char *pdev_name)
{
struct gdata_port *port;
int i;
for (i = 0; i < no_data_ports; i++) {
port = gdata_ports[i].port;
if (!strncmp(port->brdg.name, pdev_name, BRIDGE_NAME_MAX_LEN))
return i;
}
return -EINVAL;
}
static int ghsic_data_probe(struct platform_device *pdev)
{
struct gdata_port *port;
int id;
pr_debug("%s: name:%s no_data_ports= %d\n", __func__, pdev->name,
no_data_ports);
id = ghsic_data_get_port_id(pdev->name);
if (id < 0 || id >= no_data_ports) {
pr_err("%s: invalid port: %d\n", __func__, id);
return -EINVAL;
}
port = gdata_ports[id].port;
set_bit(CH_READY, &port->bridge_sts);
/* if usb is online, try opening bridge */
if (atomic_read(&port->connected))
queue_work(port->wq, &port->connect_w);
return 0;
}
/* mdm disconnect */
static int ghsic_data_remove(struct platform_device *pdev)
{
struct gdata_port *port;
struct usb_ep *ep_in;
struct usb_ep *ep_out;
int id;
pr_debug("%s: name:%s\n", __func__, pdev->name);
id = ghsic_data_get_port_id(pdev->name);
if (id < 0 || id >= no_data_ports) {
pr_err("%s: invalid port: %d\n", __func__, id);
return -EINVAL;
}
port = gdata_ports[id].port;
ep_in = port->in;
if (ep_in)
usb_ep_fifo_flush(ep_in);
ep_out = port->out;
if (ep_out)
usb_ep_fifo_flush(ep_out);
/* cancel pending writes to MDM */
cancel_work_sync(&port->write_tomdm_w);
ghsic_data_free_buffers(port);
cancel_work_sync(&port->connect_w);
if (test_and_clear_bit(CH_OPENED, &port->bridge_sts))
data_bridge_close(port->brdg.ch_id);
clear_bit(CH_READY, &port->bridge_sts);
return 0;
}
static void ghsic_data_port_free(int portno)
{
struct gdata_port *port = gdata_ports[portno].port;
struct platform_driver *pdrv = &gdata_ports[portno].pdrv;
destroy_workqueue(port->wq);
kfree(port);
if (pdrv)
platform_driver_unregister(pdrv);
}
static int ghsic_data_port_alloc(unsigned port_num, enum gadget_type gtype)
{
struct gdata_port *port;
struct platform_driver *pdrv;
char *name;
port = kzalloc(sizeof(struct gdata_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
name = gdata_ports[port_num].port_name;
port->wq = create_singlethread_workqueue(name);
if (!port->wq) {
pr_err("%s: Unable to create workqueue:%s\n", __func__, name);
kfree(port);
return -ENOMEM;
}
port->port_num = port_num;
/* port initialization */
spin_lock_init(&port->rx_lock);
spin_lock_init(&port->tx_lock);
INIT_WORK(&port->connect_w, ghsic_data_connect_w);
INIT_WORK(&port->disconnect_w, ghsic_data_disconnect_w);
INIT_WORK(&port->write_tohost_w, ghsic_data_write_tohost);
INIT_WORK(&port->write_tomdm_w, ghsic_data_write_tomdm);
INIT_LIST_HEAD(&port->tx_idle);
INIT_LIST_HEAD(&port->rx_idle);
skb_queue_head_init(&port->tx_skb_q);
skb_queue_head_init(&port->rx_skb_q);
port->gtype = gtype;
port->brdg.name = name;
port->brdg.ctx = port;
port->brdg.ops.send_pkt = ghsic_data_receive;
port->brdg.ops.unthrottle_tx = ghsic_data_unthrottle_tx;
gdata_ports[port_num].port = port;
pdrv = &gdata_ports[port_num].pdrv;
pdrv->probe = ghsic_data_probe;
pdrv->remove = ghsic_data_remove;
pdrv->driver.name = name;
pdrv->driver.owner = THIS_MODULE;
platform_driver_register(pdrv);
pr_debug("%s: port:%p portno:%d\n", __func__, port, port_num);
return 0;
}
void ghsic_data_disconnect(void *gptr, int port_num)
{
struct gdata_port *port;
unsigned long flags;
pr_debug("%s: port#%d\n", __func__, port_num);
port = gdata_ports[port_num].port;
if (port_num > no_data_ports) {
pr_err("%s: invalid portno#%d\n", __func__, port_num);
return;
}
if (!gptr || !port) {
pr_err("%s: port is null\n", __func__);
return;
}
ghsic_data_free_buffers(port);
/* disable endpoints */
if (port->in) {
usb_ep_disable(port->in);
port->in->driver_data = NULL;
}
if (port->out) {
usb_ep_disable(port->out);
port->out->driver_data = NULL;
}
atomic_set(&port->connected, 0);
spin_lock_irqsave(&port->tx_lock, flags);
port->in = NULL;
port->n_tx_req_queued = 0;
clear_bit(RX_THROTTLED, &port->brdg.flags);
spin_unlock_irqrestore(&port->tx_lock, flags);
spin_lock_irqsave(&port->rx_lock, flags);
port->out = NULL;
clear_bit(TX_THROTTLED, &port->brdg.flags);
spin_unlock_irqrestore(&port->rx_lock, flags);
queue_work(port->wq, &port->disconnect_w);
}
int ghsic_data_connect(void *gptr, int port_num)
{
struct gdata_port *port;
struct gserial *gser;
struct grmnet *gr;
unsigned long flags;
int ret = 0;
pr_debug("%s: port#%d\n", __func__, port_num);
port = gdata_ports[port_num].port;
if (port_num > no_data_ports) {
pr_err("%s: invalid portno#%d\n", __func__, port_num);
return -ENODEV;
}
if (!gptr || !port) {
pr_err("%s: port is null\n", __func__);
return -ENODEV;
}
if (port->gtype == USB_GADGET_SERIAL) {
gser = gptr;
spin_lock_irqsave(&port->tx_lock, flags);
port->in = gser->in;
spin_unlock_irqrestore(&port->tx_lock, flags);
spin_lock_irqsave(&port->rx_lock, flags);
port->out = gser->out;
spin_unlock_irqrestore(&port->rx_lock, flags);
port->tx_q_size = ghsic_data_serial_tx_q_size;
port->rx_q_size = ghsic_data_serial_rx_q_size;
gser->in->driver_data = port;
gser->out->driver_data = port;
} else {
gr = gptr;
spin_lock_irqsave(&port->tx_lock, flags);
port->in = gr->in;
spin_unlock_irqrestore(&port->tx_lock, flags);
spin_lock_irqsave(&port->rx_lock, flags);
port->out = gr->out;
spin_unlock_irqrestore(&port->rx_lock, flags);
port->tx_q_size = ghsic_data_rmnet_tx_q_size;
port->rx_q_size = ghsic_data_rmnet_rx_q_size;
gr->in->driver_data = port;
gr->out->driver_data = port;
}
ret = usb_ep_enable(port->in);
if (ret) {
pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
__func__, port->in);
goto fail;
}
ret = usb_ep_enable(port->out);
if (ret) {
pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
__func__, port->out);
usb_ep_disable(port->in);
goto fail;
}
atomic_set(&port->connected, 1);
spin_lock_irqsave(&port->tx_lock, flags);
port->to_host = 0;
port->rx_throttled_cnt = 0;
port->rx_unthrottled_cnt = 0;
port->unthrottled_pnd_skbs = 0;
spin_unlock_irqrestore(&port->tx_lock, flags);
spin_lock_irqsave(&port->rx_lock, flags);
port->to_modem = 0;
port->tomodem_drp_cnt = 0;
port->tx_throttled_cnt = 0;
port->tx_unthrottled_cnt = 0;
spin_unlock_irqrestore(&port->rx_lock, flags);
queue_work(port->wq, &port->connect_w);
fail:
return ret;
}
#if defined(CONFIG_DEBUG_FS)
#define DEBUG_DATA_BUF_SIZE 4096
static unsigned int record_timestamp;
module_param(record_timestamp, uint, S_IRUGO | S_IWUSR);
static struct timestamp_buf dbg_data = {
.idx = 0,
.lck = __RW_LOCK_UNLOCKED(lck)
};
/*get_timestamp - returns time of day in us */
static unsigned int get_timestamp(void)
{
struct timeval tval;
unsigned int stamp;
if (!record_timestamp)
return 0;
do_gettimeofday(&tval);
/* 2^32 = 4294967296. Limit to 4096s. */
stamp = tval.tv_sec & 0xFFF;
stamp = stamp * 1000000 + tval.tv_usec;
return stamp;
}
static void dbg_inc(unsigned *idx)
{
*idx = (*idx + 1) & (DBG_DATA_MAX-1);
}
/**
* dbg_timestamp - Stores timestamp values of a SKB life cycle
* to debug buffer
* @event: "DL": Downlink Data
* @skb: SKB used to store timestamp values to debug buffer
*/
static void dbg_timestamp(char *event, struct sk_buff * skb)
{
unsigned long flags;
struct timestamp_info *info = (struct timestamp_info *)skb->cb;
if (!record_timestamp)
return;
write_lock_irqsave(&dbg_data.lck, flags);
scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
"%p %u[%s] %u %u %u %u %u %u\n",
skb, skb->len, event, info->created, info->rx_queued,
info->rx_done, info->rx_done_sent, info->tx_queued,
get_timestamp());
dbg_inc(&dbg_data.idx);
write_unlock_irqrestore(&dbg_data.lck, flags);
}
/* show_timestamp: displays the timestamp buffer */
static ssize_t show_timestamp(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
unsigned long flags;
unsigned i;
unsigned j = 0;
char *buf;
int ret = 0;
if (!record_timestamp)
return 0;
buf = kzalloc(sizeof(char) * DEBUG_DATA_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
read_lock_irqsave(&dbg_data.lck, flags);
i = dbg_data.idx;
for (dbg_inc(&i); i != dbg_data.idx; dbg_inc(&i)) {
if (!strnlen(dbg_data.buf[i], DBG_DATA_MSG))
continue;
j += scnprintf(buf + j, DEBUG_DATA_BUF_SIZE - j,
"%s\n", dbg_data.buf[i]);
}
read_unlock_irqrestore(&dbg_data.lck, flags);
ret = simple_read_from_buffer(ubuf, count, ppos, buf, j);
kfree(buf);
return ret;
}
const struct file_operations gdata_timestamp_ops = {
.read = show_timestamp,
};
static ssize_t ghsic_data_read_stats(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
struct gdata_port *port;
struct platform_driver *pdrv;
char *buf;
unsigned long flags;
int ret;
int i;
int temp = 0;
buf = kzalloc(sizeof(char) * DEBUG_DATA_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < no_data_ports; i++) {
port = gdata_ports[i].port;
if (!port)
continue;
pdrv = &gdata_ports[i].pdrv;
spin_lock_irqsave(&port->rx_lock, flags);
temp += scnprintf(buf + temp, DEBUG_DATA_BUF_SIZE - temp,
"\nName: %s\n"
"#PORT:%d port#: %p\n"
"data_ch_open: %d\n"
"data_ch_ready: %d\n"
"\n******UL INFO*****\n\n"
"dpkts_to_modem: %lu\n"
"tomodem_drp_cnt: %u\n"
"rx_buf_len: %u\n"
"tx thld cnt %u\n"
"tx unthld cnt %u\n"
"TX_THROTTLED %d\n",
pdrv->driver.name,
i, port,
test_bit(CH_OPENED, &port->bridge_sts),
test_bit(CH_READY, &port->bridge_sts),
port->to_modem,
port->tomodem_drp_cnt,
port->rx_skb_q.qlen,
port->tx_throttled_cnt,
port->tx_unthrottled_cnt,
test_bit(TX_THROTTLED, &port->brdg.flags));
spin_unlock_irqrestore(&port->rx_lock, flags);
spin_lock_irqsave(&port->tx_lock, flags);
temp += scnprintf(buf + temp, DEBUG_DATA_BUF_SIZE - temp,
"\n******DL INFO******\n\n"
"dpkts_to_usbhost: %lu\n"
"tx_buf_len: %u\n"
"rx thld cnt %u\n"
"rx unthld cnt %u\n"
"uthld pnd skbs %u\n"
"RX_THROTTLED %d\n",
port->to_host,
port->tx_skb_q.qlen,
port->rx_throttled_cnt,
port->rx_unthrottled_cnt,
port->unthrottled_pnd_skbs,
test_bit(RX_THROTTLED, &port->brdg.flags));
spin_unlock_irqrestore(&port->tx_lock, flags);
}
ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
kfree(buf);
return ret;
}
static ssize_t ghsic_data_reset_stats(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct gdata_port *port;
int i;
unsigned long flags;
for (i = 0; i < no_data_ports; i++) {
port = gdata_ports[i].port;
if (!port)
continue;
spin_lock_irqsave(&port->rx_lock, flags);
port->to_modem = 0;
port->tomodem_drp_cnt = 0;
port->tx_throttled_cnt = 0;
port->tx_unthrottled_cnt = 0;
spin_unlock_irqrestore(&port->rx_lock, flags);
spin_lock_irqsave(&port->tx_lock, flags);
port->to_host = 0;
port->rx_throttled_cnt = 0;
port->rx_unthrottled_cnt = 0;
port->unthrottled_pnd_skbs = 0;
spin_unlock_irqrestore(&port->tx_lock, flags);
}
return count;
}
const struct file_operations ghsic_stats_ops = {
.read = ghsic_data_read_stats,
.write = ghsic_data_reset_stats,
};
static struct dentry *gdata_dent;
static struct dentry *gdata_dfile_stats;
static struct dentry *gdata_dfile_tstamp;
static void ghsic_data_debugfs_init(void)
{
gdata_dent = debugfs_create_dir("ghsic_data_xport", 0);
if (IS_ERR(gdata_dent))
return;
gdata_dfile_stats = debugfs_create_file("status", 0444, gdata_dent, 0,
&ghsic_stats_ops);
if (!gdata_dfile_stats || IS_ERR(gdata_dfile_stats)) {
debugfs_remove(gdata_dent);
return;
}
gdata_dfile_tstamp = debugfs_create_file("timestamp", 0644, gdata_dent,
0, &gdata_timestamp_ops);
if (!gdata_dfile_tstamp || IS_ERR(gdata_dfile_tstamp))
debugfs_remove(gdata_dent);
}
static void ghsic_data_debugfs_exit(void)
{
debugfs_remove(gdata_dfile_stats);
debugfs_remove(gdata_dfile_tstamp);
debugfs_remove(gdata_dent);
}
#else
static void ghsic_data_debugfs_init(void) { }
static void ghsic_data_debugfs_exit(void) { }
static void dbg_timestamp(char *event, struct sk_buff * skb)
{
return;
}
static unsigned int get_timestamp(void)
{
return 0;
}
#endif
/*portname will be used to find the bridge channel index*/
void ghsic_data_set_port_name(const char *name, const char *xport_type)
{
static unsigned int port_num;
if (port_num >= NUM_PORTS) {
pr_err("%s: setting xport name for invalid port num %d\n",
__func__, port_num);
return;
}
/*if no xport name is passed set it to xport type e.g. hsic*/
if (!name)
strlcpy(gdata_ports[port_num].port_name, xport_type,
BRIDGE_NAME_MAX_LEN);
else
strlcpy(gdata_ports[port_num].port_name, name,
BRIDGE_NAME_MAX_LEN);
/*append _data to get data bridge name: e.g. serial_hsic_data*/
strlcat(gdata_ports[port_num].port_name, "_data", BRIDGE_NAME_MAX_LEN);
port_num++;
}
int ghsic_data_setup(unsigned num_ports, enum gadget_type gtype)
{
int first_port_id = no_data_ports;
int total_num_ports = num_ports + no_data_ports;
int ret = 0;
int i;
if (!num_ports || total_num_ports > NUM_PORTS) {
pr_err("%s: Invalid num of ports count:%d\n",
__func__, num_ports);
return -EINVAL;
}
pr_debug("%s: count: %d\n", __func__, num_ports);
for (i = first_port_id; i < (num_ports + first_port_id); i++) {
/*probe can be called while port_alloc,so update no_data_ports*/
no_data_ports++;
ret = ghsic_data_port_alloc(i, gtype);
if (ret) {
no_data_ports--;
pr_err("%s: Unable to alloc port:%d\n", __func__, i);
goto free_ports;
}
}
/*return the starting index*/
return first_port_id;
free_ports:
for (i = first_port_id; i < no_data_ports; i++)
ghsic_data_port_free(i);
no_data_ports = first_port_id;
return ret;
}
static int __init ghsic_data_init(void)
{
ghsic_data_debugfs_init();
return 0;
}
module_init(ghsic_data_init);
static void __exit ghsic_data_exit(void)
{
ghsic_data_debugfs_exit();
}
module_exit(ghsic_data_exit);
MODULE_DESCRIPTION("hsic data xport driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
sandymanu/manufooty_yu_lp | arch/arm/mach-omap2/cm3xxx.c | 2257 | 21481 | /*
* OMAP3xxx CM module functions
*
* Copyright (C) 2009 Nokia Corporation
* Copyright (C) 2008-2010, 2012 Texas Instruments, Inc.
* Paul Walmsley
* Rajendra Nayak <rnayak@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/io.h>
#include "soc.h"
#include "iomap.h"
#include "common.h"
#include "prm2xxx_3xxx.h"
#include "cm.h"
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
#include "clockdomain.h"
static const u8 omap3xxx_cm_idlest_offs[] = {
CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3
};
/*
*
*/
static void _write_clktrctrl(u8 c, s16 module, u32 mask)
{
u32 v;
v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL);
v &= ~mask;
v |= c << __ffs(mask);
omap2_cm_write_mod_reg(v, module, OMAP2_CM_CLKSTCTRL);
}
bool omap3xxx_cm_is_clkdm_in_hwsup(s16 module, u32 mask)
{
u32 v;
v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL);
v &= mask;
v >>= __ffs(mask);
return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0;
}
void omap3xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask)
{
_write_clktrctrl(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, module, mask);
}
void omap3xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask)
{
_write_clktrctrl(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, module, mask);
}
void omap3xxx_cm_clkdm_force_sleep(s16 module, u32 mask)
{
_write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, module, mask);
}
void omap3xxx_cm_clkdm_force_wakeup(s16 module, u32 mask)
{
_write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, module, mask);
}
/*
*
*/
/**
* omap3xxx_cm_wait_module_ready - wait for a module to leave idle or standby
* @prcm_mod: PRCM module offset
* @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3)
* @idlest_shift: shift of the bit in the CM_IDLEST* register to check
*
* Wait for the PRCM to indicate that the module identified by
* (@prcm_mod, @idlest_id, @idlest_shift) is clocked. Return 0 upon
* success or -EBUSY if the module doesn't enable in time.
*/
int omap3xxx_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift)
{
int ena = 0, i = 0;
u8 cm_idlest_reg;
u32 mask;
if (!idlest_id || (idlest_id > ARRAY_SIZE(omap3xxx_cm_idlest_offs)))
return -EINVAL;
cm_idlest_reg = omap3xxx_cm_idlest_offs[idlest_id - 1];
mask = 1 << idlest_shift;
ena = 0;
omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) &
mask) == ena), MAX_MODULE_READY_TIME, i);
return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
}
/**
* omap3xxx_cm_split_idlest_reg - split CM_IDLEST reg addr into its components
* @idlest_reg: CM_IDLEST* virtual address
* @prcm_inst: pointer to an s16 to return the PRCM instance offset
* @idlest_reg_id: pointer to a u8 to return the CM_IDLESTx register ID
*
* XXX This function is only needed until absolute register addresses are
* removed from the OMAP struct clk records.
*/
int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id)
{
unsigned long offs;
u8 idlest_offs;
int i;
if (idlest_reg < (cm_base + OMAP3430_IVA2_MOD) ||
idlest_reg > (cm_base + 0x1ffff))
return -EINVAL;
idlest_offs = (unsigned long)idlest_reg & 0xff;
for (i = 0; i < ARRAY_SIZE(omap3xxx_cm_idlest_offs); i++) {
if (idlest_offs == omap3xxx_cm_idlest_offs[i]) {
*idlest_reg_id = i + 1;
break;
}
}
if (i == ARRAY_SIZE(omap3xxx_cm_idlest_offs))
return -EINVAL;
offs = idlest_reg - cm_base;
offs &= 0xff00;
*prcm_inst = offs;
return 0;
}
/* Clockdomain low-level operations */
static int omap3xxx_clkdm_add_sleepdep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
omap2_cm_set_mod_reg_bits((1 << clkdm2->dep_bit),
clkdm1->pwrdm.ptr->prcm_offs,
OMAP3430_CM_SLEEPDEP);
return 0;
}
static int omap3xxx_clkdm_del_sleepdep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
omap2_cm_clear_mod_reg_bits((1 << clkdm2->dep_bit),
clkdm1->pwrdm.ptr->prcm_offs,
OMAP3430_CM_SLEEPDEP);
return 0;
}
static int omap3xxx_clkdm_read_sleepdep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
return omap2_cm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs,
OMAP3430_CM_SLEEPDEP,
(1 << clkdm2->dep_bit));
}
static int omap3xxx_clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
{
struct clkdm_dep *cd;
u32 mask = 0;
for (cd = clkdm->sleepdep_srcs; cd && cd->clkdm_name; cd++) {
if (!cd->clkdm)
continue; /* only happens if data is erroneous */
mask |= 1 << cd->clkdm->dep_bit;
cd->sleepdep_usecount = 0;
}
omap2_cm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs,
OMAP3430_CM_SLEEPDEP);
return 0;
}
static int omap3xxx_clkdm_sleep(struct clockdomain *clkdm)
{
omap3xxx_cm_clkdm_force_sleep(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
return 0;
}
static int omap3xxx_clkdm_wakeup(struct clockdomain *clkdm)
{
omap3xxx_cm_clkdm_force_wakeup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
return 0;
}
static void omap3xxx_clkdm_allow_idle(struct clockdomain *clkdm)
{
if (clkdm->usecount > 0)
clkdm_add_autodeps(clkdm);
omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
}
static void omap3xxx_clkdm_deny_idle(struct clockdomain *clkdm)
{
omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
if (clkdm->usecount > 0)
clkdm_del_autodeps(clkdm);
}
static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
{
bool hwsup = false;
if (!clkdm->clktrctrl_mask)
return 0;
/*
* The CLKDM_MISSING_IDLE_REPORTING flag documentation has
* more details on the unpleasant problem this is working
* around
*/
if ((clkdm->flags & CLKDM_MISSING_IDLE_REPORTING) &&
(clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)) {
omap3xxx_clkdm_wakeup(clkdm);
return 0;
}
hwsup = omap3xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
if (hwsup) {
/* Disable HW transitions when we are changing deps */
omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
clkdm_add_autodeps(clkdm);
omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
} else {
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
omap3xxx_clkdm_wakeup(clkdm);
}
return 0;
}
static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
{
bool hwsup = false;
if (!clkdm->clktrctrl_mask)
return 0;
/*
* The CLKDM_MISSING_IDLE_REPORTING flag documentation has
* more details on the unpleasant problem this is working
* around
*/
if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING &&
!(clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) {
omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
return 0;
}
hwsup = omap3xxx_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
if (hwsup) {
/* Disable HW transitions when we are changing deps */
omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
clkdm_del_autodeps(clkdm);
omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
} else {
if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
omap3xxx_clkdm_sleep(clkdm);
}
return 0;
}
struct clkdm_ops omap3_clkdm_operations = {
.clkdm_add_wkdep = omap2_clkdm_add_wkdep,
.clkdm_del_wkdep = omap2_clkdm_del_wkdep,
.clkdm_read_wkdep = omap2_clkdm_read_wkdep,
.clkdm_clear_all_wkdeps = omap2_clkdm_clear_all_wkdeps,
.clkdm_add_sleepdep = omap3xxx_clkdm_add_sleepdep,
.clkdm_del_sleepdep = omap3xxx_clkdm_del_sleepdep,
.clkdm_read_sleepdep = omap3xxx_clkdm_read_sleepdep,
.clkdm_clear_all_sleepdeps = omap3xxx_clkdm_clear_all_sleepdeps,
.clkdm_sleep = omap3xxx_clkdm_sleep,
.clkdm_wakeup = omap3xxx_clkdm_wakeup,
.clkdm_allow_idle = omap3xxx_clkdm_allow_idle,
.clkdm_deny_idle = omap3xxx_clkdm_deny_idle,
.clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
.clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
};
/*
* Context save/restore code - OMAP3 only
*/
struct omap3_cm_regs {
u32 iva2_cm_clksel1;
u32 iva2_cm_clksel2;
u32 cm_sysconfig;
u32 sgx_cm_clksel;
u32 dss_cm_clksel;
u32 cam_cm_clksel;
u32 per_cm_clksel;
u32 emu_cm_clksel;
u32 emu_cm_clkstctrl;
u32 pll_cm_autoidle;
u32 pll_cm_autoidle2;
u32 pll_cm_clksel4;
u32 pll_cm_clksel5;
u32 pll_cm_clken2;
u32 cm_polctrl;
u32 iva2_cm_fclken;
u32 iva2_cm_clken_pll;
u32 core_cm_fclken1;
u32 core_cm_fclken3;
u32 sgx_cm_fclken;
u32 wkup_cm_fclken;
u32 dss_cm_fclken;
u32 cam_cm_fclken;
u32 per_cm_fclken;
u32 usbhost_cm_fclken;
u32 core_cm_iclken1;
u32 core_cm_iclken2;
u32 core_cm_iclken3;
u32 sgx_cm_iclken;
u32 wkup_cm_iclken;
u32 dss_cm_iclken;
u32 cam_cm_iclken;
u32 per_cm_iclken;
u32 usbhost_cm_iclken;
u32 iva2_cm_autoidle2;
u32 mpu_cm_autoidle2;
u32 iva2_cm_clkstctrl;
u32 mpu_cm_clkstctrl;
u32 core_cm_clkstctrl;
u32 sgx_cm_clkstctrl;
u32 dss_cm_clkstctrl;
u32 cam_cm_clkstctrl;
u32 per_cm_clkstctrl;
u32 neon_cm_clkstctrl;
u32 usbhost_cm_clkstctrl;
u32 core_cm_autoidle1;
u32 core_cm_autoidle2;
u32 core_cm_autoidle3;
u32 wkup_cm_autoidle;
u32 dss_cm_autoidle;
u32 cam_cm_autoidle;
u32 per_cm_autoidle;
u32 usbhost_cm_autoidle;
u32 sgx_cm_sleepdep;
u32 dss_cm_sleepdep;
u32 cam_cm_sleepdep;
u32 per_cm_sleepdep;
u32 usbhost_cm_sleepdep;
u32 cm_clkout_ctrl;
};
static struct omap3_cm_regs cm_context;
void omap3_cm_save_context(void)
{
cm_context.iva2_cm_clksel1 =
omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL1);
cm_context.iva2_cm_clksel2 =
omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL2);
cm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG);
cm_context.sgx_cm_clksel =
omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL);
cm_context.dss_cm_clksel =
omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL);
cm_context.cam_cm_clksel =
omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSEL);
cm_context.per_cm_clksel =
omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSEL);
cm_context.emu_cm_clksel =
omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1);
cm_context.emu_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL);
/*
* As per erratum i671, ROM code does not respect the PER DPLL
* programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1.
* In this case, even though this register has been saved in
* scratchpad contents, we need to restore AUTO_PERIPH_DPLL
* by ourselves. So, we need to save it anyway.
*/
cm_context.pll_cm_autoidle =
omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE);
cm_context.pll_cm_autoidle2 =
omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2);
cm_context.pll_cm_clksel4 =
omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4);
cm_context.pll_cm_clksel5 =
omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5);
cm_context.pll_cm_clken2 =
omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2);
cm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL);
cm_context.iva2_cm_fclken =
omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_FCLKEN);
cm_context.iva2_cm_clken_pll =
omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);
cm_context.core_cm_fclken1 =
omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
cm_context.core_cm_fclken3 =
omap2_cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
cm_context.sgx_cm_fclken =
omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_FCLKEN);
cm_context.wkup_cm_fclken =
omap2_cm_read_mod_reg(WKUP_MOD, CM_FCLKEN);
cm_context.dss_cm_fclken =
omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_FCLKEN);
cm_context.cam_cm_fclken =
omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_FCLKEN);
cm_context.per_cm_fclken =
omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN);
cm_context.usbhost_cm_fclken =
omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN);
cm_context.core_cm_iclken1 =
omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN1);
cm_context.core_cm_iclken2 =
omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN2);
cm_context.core_cm_iclken3 =
omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN3);
cm_context.sgx_cm_iclken =
omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_ICLKEN);
cm_context.wkup_cm_iclken =
omap2_cm_read_mod_reg(WKUP_MOD, CM_ICLKEN);
cm_context.dss_cm_iclken =
omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_ICLKEN);
cm_context.cam_cm_iclken =
omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_ICLKEN);
cm_context.per_cm_iclken =
omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN);
cm_context.usbhost_cm_iclken =
omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN);
cm_context.iva2_cm_autoidle2 =
omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
cm_context.mpu_cm_autoidle2 =
omap2_cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2);
cm_context.iva2_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.mpu_cm_clkstctrl =
omap2_cm_read_mod_reg(MPU_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.core_cm_clkstctrl =
omap2_cm_read_mod_reg(CORE_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.sgx_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.dss_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.cam_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.per_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.neon_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL);
cm_context.usbhost_cm_clkstctrl =
omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
OMAP2_CM_CLKSTCTRL);
cm_context.core_cm_autoidle1 =
omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE1);
cm_context.core_cm_autoidle2 =
omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE2);
cm_context.core_cm_autoidle3 =
omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE3);
cm_context.wkup_cm_autoidle =
omap2_cm_read_mod_reg(WKUP_MOD, CM_AUTOIDLE);
cm_context.dss_cm_autoidle =
omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_AUTOIDLE);
cm_context.cam_cm_autoidle =
omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_AUTOIDLE);
cm_context.per_cm_autoidle =
omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
cm_context.usbhost_cm_autoidle =
omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE);
cm_context.sgx_cm_sleepdep =
omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD,
OMAP3430_CM_SLEEPDEP);
cm_context.dss_cm_sleepdep =
omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP);
cm_context.cam_cm_sleepdep =
omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP);
cm_context.per_cm_sleepdep =
omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP);
cm_context.usbhost_cm_sleepdep =
omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
OMAP3430_CM_SLEEPDEP);
cm_context.cm_clkout_ctrl =
omap2_cm_read_mod_reg(OMAP3430_CCR_MOD,
OMAP3_CM_CLKOUT_CTRL_OFFSET);
}
void omap3_cm_restore_context(void)
{
omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel1, OMAP3430_IVA2_MOD,
CM_CLKSEL1);
omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel2, OMAP3430_IVA2_MOD,
CM_CLKSEL2);
__raw_writel(cm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG);
omap2_cm_write_mod_reg(cm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD,
CM_CLKSEL);
omap2_cm_write_mod_reg(cm_context.dss_cm_clksel, OMAP3430_DSS_MOD,
CM_CLKSEL);
omap2_cm_write_mod_reg(cm_context.cam_cm_clksel, OMAP3430_CAM_MOD,
CM_CLKSEL);
omap2_cm_write_mod_reg(cm_context.per_cm_clksel, OMAP3430_PER_MOD,
CM_CLKSEL);
omap2_cm_write_mod_reg(cm_context.emu_cm_clksel, OMAP3430_EMU_MOD,
CM_CLKSEL1);
omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD,
OMAP2_CM_CLKSTCTRL);
/*
* As per erratum i671, ROM code does not respect the PER DPLL
* programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1.
* In this case, we need to restore AUTO_PERIPH_DPLL by ourselves.
*/
omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD,
CM_AUTOIDLE);
omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD,
CM_AUTOIDLE2);
omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD,
OMAP3430ES2_CM_CLKSEL4);
omap2_cm_write_mod_reg(cm_context.pll_cm_clksel5, PLL_MOD,
OMAP3430ES2_CM_CLKSEL5);
omap2_cm_write_mod_reg(cm_context.pll_cm_clken2, PLL_MOD,
OMAP3430ES2_CM_CLKEN2);
__raw_writel(cm_context.cm_polctrl, OMAP3430_CM_POLCTRL);
omap2_cm_write_mod_reg(cm_context.iva2_cm_fclken, OMAP3430_IVA2_MOD,
CM_FCLKEN);
omap2_cm_write_mod_reg(cm_context.iva2_cm_clken_pll, OMAP3430_IVA2_MOD,
OMAP3430_CM_CLKEN_PLL);
omap2_cm_write_mod_reg(cm_context.core_cm_fclken1, CORE_MOD,
CM_FCLKEN1);
omap2_cm_write_mod_reg(cm_context.core_cm_fclken3, CORE_MOD,
OMAP3430ES2_CM_FCLKEN3);
omap2_cm_write_mod_reg(cm_context.sgx_cm_fclken, OMAP3430ES2_SGX_MOD,
CM_FCLKEN);
omap2_cm_write_mod_reg(cm_context.wkup_cm_fclken, WKUP_MOD, CM_FCLKEN);
omap2_cm_write_mod_reg(cm_context.dss_cm_fclken, OMAP3430_DSS_MOD,
CM_FCLKEN);
omap2_cm_write_mod_reg(cm_context.cam_cm_fclken, OMAP3430_CAM_MOD,
CM_FCLKEN);
omap2_cm_write_mod_reg(cm_context.per_cm_fclken, OMAP3430_PER_MOD,
CM_FCLKEN);
omap2_cm_write_mod_reg(cm_context.usbhost_cm_fclken,
OMAP3430ES2_USBHOST_MOD, CM_FCLKEN);
omap2_cm_write_mod_reg(cm_context.core_cm_iclken1, CORE_MOD,
CM_ICLKEN1);
omap2_cm_write_mod_reg(cm_context.core_cm_iclken2, CORE_MOD,
CM_ICLKEN2);
omap2_cm_write_mod_reg(cm_context.core_cm_iclken3, CORE_MOD,
CM_ICLKEN3);
omap2_cm_write_mod_reg(cm_context.sgx_cm_iclken, OMAP3430ES2_SGX_MOD,
CM_ICLKEN);
omap2_cm_write_mod_reg(cm_context.wkup_cm_iclken, WKUP_MOD, CM_ICLKEN);
omap2_cm_write_mod_reg(cm_context.dss_cm_iclken, OMAP3430_DSS_MOD,
CM_ICLKEN);
omap2_cm_write_mod_reg(cm_context.cam_cm_iclken, OMAP3430_CAM_MOD,
CM_ICLKEN);
omap2_cm_write_mod_reg(cm_context.per_cm_iclken, OMAP3430_PER_MOD,
CM_ICLKEN);
omap2_cm_write_mod_reg(cm_context.usbhost_cm_iclken,
OMAP3430ES2_USBHOST_MOD, CM_ICLKEN);
omap2_cm_write_mod_reg(cm_context.iva2_cm_autoidle2, OMAP3430_IVA2_MOD,
CM_AUTOIDLE2);
omap2_cm_write_mod_reg(cm_context.mpu_cm_autoidle2, MPU_MOD,
CM_AUTOIDLE2);
omap2_cm_write_mod_reg(cm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.mpu_cm_clkstctrl, MPU_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.core_cm_clkstctrl, CORE_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.sgx_cm_clkstctrl, OMAP3430ES2_SGX_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.dss_cm_clkstctrl, OMAP3430_DSS_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.cam_cm_clkstctrl, OMAP3430_CAM_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.per_cm_clkstctrl, OMAP3430_PER_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.neon_cm_clkstctrl, OMAP3430_NEON_MOD,
OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.usbhost_cm_clkstctrl,
OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL);
omap2_cm_write_mod_reg(cm_context.core_cm_autoidle1, CORE_MOD,
CM_AUTOIDLE1);
omap2_cm_write_mod_reg(cm_context.core_cm_autoidle2, CORE_MOD,
CM_AUTOIDLE2);
omap2_cm_write_mod_reg(cm_context.core_cm_autoidle3, CORE_MOD,
CM_AUTOIDLE3);
omap2_cm_write_mod_reg(cm_context.wkup_cm_autoidle, WKUP_MOD,
CM_AUTOIDLE);
omap2_cm_write_mod_reg(cm_context.dss_cm_autoidle, OMAP3430_DSS_MOD,
CM_AUTOIDLE);
omap2_cm_write_mod_reg(cm_context.cam_cm_autoidle, OMAP3430_CAM_MOD,
CM_AUTOIDLE);
omap2_cm_write_mod_reg(cm_context.per_cm_autoidle, OMAP3430_PER_MOD,
CM_AUTOIDLE);
omap2_cm_write_mod_reg(cm_context.usbhost_cm_autoidle,
OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE);
omap2_cm_write_mod_reg(cm_context.sgx_cm_sleepdep, OMAP3430ES2_SGX_MOD,
OMAP3430_CM_SLEEPDEP);
omap2_cm_write_mod_reg(cm_context.dss_cm_sleepdep, OMAP3430_DSS_MOD,
OMAP3430_CM_SLEEPDEP);
omap2_cm_write_mod_reg(cm_context.cam_cm_sleepdep, OMAP3430_CAM_MOD,
OMAP3430_CM_SLEEPDEP);
omap2_cm_write_mod_reg(cm_context.per_cm_sleepdep, OMAP3430_PER_MOD,
OMAP3430_CM_SLEEPDEP);
omap2_cm_write_mod_reg(cm_context.usbhost_cm_sleepdep,
OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP);
omap2_cm_write_mod_reg(cm_context.cm_clkout_ctrl, OMAP3430_CCR_MOD,
OMAP3_CM_CLKOUT_CTRL_OFFSET);
}
/*
*
*/
static struct cm_ll_data omap3xxx_cm_ll_data = {
.split_idlest_reg = &omap3xxx_cm_split_idlest_reg,
.wait_module_ready = &omap3xxx_cm_wait_module_ready,
};
int __init omap3xxx_cm_init(void)
{
if (!cpu_is_omap34xx())
return 0;
return cm_register(&omap3xxx_cm_ll_data);
}
static void __exit omap3xxx_cm_exit(void)
{
if (!cpu_is_omap34xx())
return;
/* Should never happen */
WARN(cm_unregister(&omap3xxx_cm_ll_data),
"%s: cm_ll_data function pointer mismatch\n", __func__);
}
__exitcall(omap3xxx_cm_exit);
| gpl-2.0 |
ashwinr64/android_kernel_motorola_msm8952 | drivers/regulator/tps6507x-regulator.c | 2257 | 14022 | /*
* tps6507x-regulator.c
*
* Regulator driver for TPS65073 PMIC
*
* Copyright (C) 2009 Texas Instrument Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
* whether express or implied; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/tps6507x.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/mfd/tps6507x.h>
#include <linux/regulator/of_regulator.h>
/* DCDC's */
#define TPS6507X_DCDC_1 0
#define TPS6507X_DCDC_2 1
#define TPS6507X_DCDC_3 2
/* LDOs */
#define TPS6507X_LDO_1 3
#define TPS6507X_LDO_2 4
#define TPS6507X_MAX_REG_ID TPS6507X_LDO_2
/* Number of step-down converters available */
#define TPS6507X_NUM_DCDC 3
/* Number of LDO voltage regulators available */
#define TPS6507X_NUM_LDO 2
/* Number of total regulators available */
#define TPS6507X_NUM_REGULATOR (TPS6507X_NUM_DCDC + TPS6507X_NUM_LDO)
/* Supported voltage values for regulators (in microVolts) */
static const unsigned int VDCDCx_VSEL_table[] = {
725000, 750000, 775000, 800000,
825000, 850000, 875000, 900000,
925000, 950000, 975000, 1000000,
1025000, 1050000, 1075000, 1100000,
1125000, 1150000, 1175000, 1200000,
1225000, 1250000, 1275000, 1300000,
1325000, 1350000, 1375000, 1400000,
1425000, 1450000, 1475000, 1500000,
1550000, 1600000, 1650000, 1700000,
1750000, 1800000, 1850000, 1900000,
1950000, 2000000, 2050000, 2100000,
2150000, 2200000, 2250000, 2300000,
2350000, 2400000, 2450000, 2500000,
2550000, 2600000, 2650000, 2700000,
2750000, 2800000, 2850000, 2900000,
3000000, 3100000, 3200000, 3300000,
};
static const unsigned int LDO1_VSEL_table[] = {
1000000, 1100000, 1200000, 1250000,
1300000, 1350000, 1400000, 1500000,
1600000, 1800000, 2500000, 2750000,
2800000, 3000000, 3100000, 3300000,
};
/* The voltage mapping table for LDO2 is the same as VDCDCx */
#define LDO2_VSEL_table VDCDCx_VSEL_table
struct tps_info {
const char *name;
u8 table_len;
const unsigned int *table;
/* Does DCDC high or the low register defines output voltage? */
bool defdcdc_default;
};
static struct tps_info tps6507x_pmic_regs[] = {
{
.name = "VDCDC1",
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC2",
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC3",
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "LDO1",
.table_len = ARRAY_SIZE(LDO1_VSEL_table),
.table = LDO1_VSEL_table,
},
{
.name = "LDO2",
.table_len = ARRAY_SIZE(LDO2_VSEL_table),
.table = LDO2_VSEL_table,
},
};
struct tps6507x_pmic {
struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
struct tps6507x_dev *mfd;
struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
struct tps_info *info[TPS6507X_NUM_REGULATOR];
struct mutex io_lock;
};
static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg)
{
u8 val;
int err;
err = tps->mfd->read_dev(tps->mfd, reg, 1, &val);
if (err)
return err;
return val;
}
static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
return tps->mfd->write_dev(tps->mfd, reg, 1, &val);
}
static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data |= mask;
err = tps6507x_pmic_write(tps, reg, data);
if (err)
dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data &= ~mask;
err = tps6507x_pmic_write(tps, reg, data);
if (err)
dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg)
{
int data;
mutex_lock(&tps->io_lock);
data = tps6507x_pmic_read(tps, reg);
if (data < 0)
dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return data;
}
static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
int err;
mutex_lock(&tps->io_lock);
err = tps6507x_pmic_write(tps, reg, val);
if (err < 0)
dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return err;
}
static int tps6507x_pmic_is_enabled(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, rid = rdev_get_id(dev);
u8 shift;
if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - rid;
data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
if (data < 0)
return data;
else
return (data & 1<<shift) ? 1 : 0;
}
static int tps6507x_pmic_enable(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int rid = rdev_get_id(dev);
u8 shift;
if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - rid;
return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
}
static int tps6507x_pmic_disable(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int rid = rdev_get_id(dev);
u8 shift;
if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - rid;
return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
1 << shift);
}
static int tps6507x_pmic_get_voltage_sel(struct regulator_dev *dev)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, rid = rdev_get_id(dev);
u8 reg, mask;
switch (rid) {
case TPS6507X_DCDC_1:
reg = TPS6507X_REG_DEFDCDC1;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_2:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC2_HIGH;
else
reg = TPS6507X_REG_DEFDCDC2_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_3:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC3_HIGH;
else
reg = TPS6507X_REG_DEFDCDC3_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_LDO_1:
reg = TPS6507X_REG_LDO_CTRL1;
mask = TPS6507X_REG_LDO_CTRL1_LDO1_MASK;
break;
case TPS6507X_LDO_2:
reg = TPS6507X_REG_DEFLDO2;
mask = TPS6507X_REG_DEFLDO2_LDO2_MASK;
break;
default:
return -EINVAL;
}
data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= mask;
return data;
}
static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, rid = rdev_get_id(dev);
u8 reg, mask;
switch (rid) {
case TPS6507X_DCDC_1:
reg = TPS6507X_REG_DEFDCDC1;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_2:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC2_HIGH;
else
reg = TPS6507X_REG_DEFDCDC2_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_DCDC_3:
if (tps->info[rid]->defdcdc_default)
reg = TPS6507X_REG_DEFDCDC3_HIGH;
else
reg = TPS6507X_REG_DEFDCDC3_LOW;
mask = TPS6507X_DEFDCDCX_DCDC_MASK;
break;
case TPS6507X_LDO_1:
reg = TPS6507X_REG_LDO_CTRL1;
mask = TPS6507X_REG_LDO_CTRL1_LDO1_MASK;
break;
case TPS6507X_LDO_2:
reg = TPS6507X_REG_DEFLDO2;
mask = TPS6507X_REG_DEFLDO2_LDO2_MASK;
break;
default:
return -EINVAL;
}
data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= ~mask;
data |= selector;
return tps6507x_pmic_reg_write(tps, reg, data);
}
static struct regulator_ops tps6507x_pmic_ops = {
.is_enabled = tps6507x_pmic_is_enabled,
.enable = tps6507x_pmic_enable,
.disable = tps6507x_pmic_disable,
.get_voltage_sel = tps6507x_pmic_get_voltage_sel,
.set_voltage_sel = tps6507x_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
};
#ifdef CONFIG_OF
static struct of_regulator_match tps6507x_matches[] = {
{ .name = "VDCDC1"},
{ .name = "VDCDC2"},
{ .name = "VDCDC3"},
{ .name = "LDO1"},
{ .name = "LDO2"},
};
static struct tps6507x_board *tps6507x_parse_dt_reg_data(
struct platform_device *pdev,
struct of_regulator_match **tps6507x_reg_matches)
{
struct tps6507x_board *tps_board;
struct device_node *np = pdev->dev.parent->of_node;
struct device_node *regulators;
struct of_regulator_match *matches;
static struct regulator_init_data *reg_data;
int idx = 0, count, ret;
tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
GFP_KERNEL);
if (!tps_board) {
dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
return NULL;
}
regulators = of_find_node_by_name(np, "regulators");
if (!regulators) {
dev_err(&pdev->dev, "regulator node not found\n");
return NULL;
}
count = ARRAY_SIZE(tps6507x_matches);
matches = tps6507x_matches;
ret = of_regulator_match(&pdev->dev, regulators, matches, count);
if (ret < 0) {
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
ret);
return NULL;
}
*tps6507x_reg_matches = matches;
reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
* TPS6507X_NUM_REGULATOR), GFP_KERNEL);
if (!reg_data) {
dev_err(&pdev->dev, "Failure to alloc init data for regulators.\n");
return NULL;
}
tps_board->tps6507x_pmic_init_data = reg_data;
for (idx = 0; idx < count; idx++) {
if (!matches[idx].init_data || !matches[idx].of_node)
continue;
memcpy(®_data[idx], matches[idx].init_data,
sizeof(struct regulator_init_data));
}
return tps_board;
}
#else
static inline struct tps6507x_board *tps6507x_parse_dt_reg_data(
struct platform_device *pdev,
struct of_regulator_match **tps6507x_reg_matches)
{
*tps6507x_reg_matches = NULL;
return NULL;
}
#endif
static int tps6507x_pmic_probe(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_config config = { };
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
struct tps6507x_pmic *tps;
struct tps6507x_board *tps_board;
struct of_regulator_match *tps6507x_reg_matches = NULL;
int i;
int error;
unsigned int prop;
/**
* tps_board points to pmic related constants
* coming from the board-evm file.
*/
tps_board = dev_get_platdata(tps6507x_dev->dev);
if (!tps_board && tps6507x_dev->dev->of_node)
tps_board = tps6507x_parse_dt_reg_data(pdev,
&tps6507x_reg_matches);
if (!tps_board)
return -EINVAL;
/**
* init_data points to array of regulator_init structures
* coming from the board-evm file.
*/
init_data = tps_board->tps6507x_pmic_init_data;
if (!init_data)
return -EINVAL;
tps = devm_kzalloc(&pdev->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
mutex_init(&tps->io_lock);
/* common for all regulators */
tps->mfd = tps6507x_dev;
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
/* Register the regulators */
tps->info[i] = info;
if (init_data->driver_data) {
struct tps6507x_reg_platform_data *data =
init_data->driver_data;
tps->info[i]->defdcdc_default = data->defdcdc_default;
}
tps->desc[i].name = info->name;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
tps->desc[i].volt_table = info->table;
tps->desc[i].ops = &tps6507x_pmic_ops;
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
config.dev = tps6507x_dev->dev;
config.init_data = init_data;
config.driver_data = tps;
if (tps6507x_reg_matches) {
error = of_property_read_u32(
tps6507x_reg_matches[i].of_node,
"ti,defdcdc_default", &prop);
if (!error)
tps->info[i]->defdcdc_default = prop;
config.of_node = tps6507x_reg_matches[i].of_node;
}
rdev = regulator_register(&tps->desc[i], &config);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
"failed to register %s regulator\n",
pdev->name);
error = PTR_ERR(rdev);
goto fail;
}
/* Save regulator for cleanup */
tps->rdev[i] = rdev;
}
tps6507x_dev->pmic = tps;
platform_set_drvdata(pdev, tps6507x_dev);
return 0;
fail:
while (--i >= 0)
regulator_unregister(tps->rdev[i]);
return error;
}
static int tps6507x_pmic_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
struct tps6507x_pmic *tps = tps6507x_dev->pmic;
int i;
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
return 0;
}
static struct platform_driver tps6507x_pmic_driver = {
.driver = {
.name = "tps6507x-pmic",
.owner = THIS_MODULE,
},
.probe = tps6507x_pmic_probe,
.remove = tps6507x_pmic_remove,
};
static int __init tps6507x_pmic_init(void)
{
return platform_driver_register(&tps6507x_pmic_driver);
}
subsys_initcall(tps6507x_pmic_init);
static void __exit tps6507x_pmic_cleanup(void)
{
platform_driver_unregister(&tps6507x_pmic_driver);
}
module_exit(tps6507x_pmic_cleanup);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:tps6507x-pmic");
| gpl-2.0 |
knone1/Shamu | drivers/media/tuners/tda9887.c | 2257 | 18621 | #include <linux/module.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/tuner.h>
#include "tuner-i2c.h"
#include "tda9887.h"
/* Chips:
TDA9885 (PAL, NTSC)
TDA9886 (PAL, SECAM, NTSC)
TDA9887 (PAL, SECAM, NTSC, FM Radio)
Used as part of several tuners
*/
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable verbose debug messages");
static DEFINE_MUTEX(tda9887_list_mutex);
static LIST_HEAD(hybrid_tuner_instance_list);
struct tda9887_priv {
struct tuner_i2c_props i2c_props;
struct list_head hybrid_tuner_instance_list;
unsigned char data[4];
unsigned int config;
unsigned int mode;
unsigned int audmode;
v4l2_std_id std;
bool standby;
};
/* ---------------------------------------------------------------------- */
#define UNSET (-1U)
struct tvnorm {
v4l2_std_id std;
char *name;
unsigned char b;
unsigned char c;
unsigned char e;
};
/* ---------------------------------------------------------------------- */
//
// TDA defines
//
//// first reg (b)
#define cVideoTrapBypassOFF 0x00 // bit b0
#define cVideoTrapBypassON 0x01 // bit b0
#define cAutoMuteFmInactive 0x00 // bit b1
#define cAutoMuteFmActive 0x02 // bit b1
#define cIntercarrier 0x00 // bit b2
#define cQSS 0x04 // bit b2
#define cPositiveAmTV 0x00 // bit b3:4
#define cFmRadio 0x08 // bit b3:4
#define cNegativeFmTV 0x10 // bit b3:4
#define cForcedMuteAudioON 0x20 // bit b5
#define cForcedMuteAudioOFF 0x00 // bit b5
#define cOutputPort1Active 0x00 // bit b6
#define cOutputPort1Inactive 0x40 // bit b6
#define cOutputPort2Active 0x00 // bit b7
#define cOutputPort2Inactive 0x80 // bit b7
//// second reg (c)
#define cDeemphasisOFF 0x00 // bit c5
#define cDeemphasisON 0x20 // bit c5
#define cDeemphasis75 0x00 // bit c6
#define cDeemphasis50 0x40 // bit c6
#define cAudioGain0 0x00 // bit c7
#define cAudioGain6 0x80 // bit c7
#define cTopMask 0x1f // bit c0:4
#define cTopDefault 0x10 // bit c0:4
//// third reg (e)
#define cAudioIF_4_5 0x00 // bit e0:1
#define cAudioIF_5_5 0x01 // bit e0:1
#define cAudioIF_6_0 0x02 // bit e0:1
#define cAudioIF_6_5 0x03 // bit e0:1
#define cVideoIFMask 0x1c // bit e2:4
/* Video IF selection in TV Mode (bit B3=0) */
#define cVideoIF_58_75 0x00 // bit e2:4
#define cVideoIF_45_75 0x04 // bit e2:4
#define cVideoIF_38_90 0x08 // bit e2:4
#define cVideoIF_38_00 0x0C // bit e2:4
#define cVideoIF_33_90 0x10 // bit e2:4
#define cVideoIF_33_40 0x14 // bit e2:4
#define cRadioIF_45_75 0x18 // bit e2:4
#define cRadioIF_38_90 0x1C // bit e2:4
/* IF1 selection in Radio Mode (bit B3=1) */
#define cRadioIF_33_30 0x00 // bit e2,4 (also 0x10,0x14)
#define cRadioIF_41_30 0x04 // bit e2,4
/* Output of AFC pin in radio mode when bit E7=1 */
#define cRadioAGC_SIF 0x00 // bit e3
#define cRadioAGC_FM 0x08 // bit e3
#define cTunerGainNormal 0x00 // bit e5
#define cTunerGainLow 0x20 // bit e5
#define cGating_18 0x00 // bit e6
#define cGating_36 0x40 // bit e6
#define cAgcOutON 0x80 // bit e7
#define cAgcOutOFF 0x00 // bit e7
/* ---------------------------------------------------------------------- */
static struct tvnorm tvnorms[] = {
{
.std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H | V4L2_STD_PAL_N,
.name = "PAL-BGHN",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
cTopDefault),
.e = ( cGating_36 |
cAudioIF_5_5 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_PAL_I,
.name = "PAL-I",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_0 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_PAL_DK,
.name = "PAL-DK",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_PAL_M | V4L2_STD_PAL_Nc,
.name = "PAL-M/Nc",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_45_75 ),
},{
.std = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
.name = "SECAM-BGH",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cTopDefault),
.e = ( cAudioIF_5_5 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_SECAM_L,
.name = "SECAM-L",
.b = ( cPositiveAmTV |
cQSS ),
.c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_SECAM_LC,
.name = "SECAM-L'",
.b = ( cOutputPort2Inactive |
cPositiveAmTV |
cQSS ),
.c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_33_90 ),
},{
.std = V4L2_STD_SECAM_DK,
.name = "SECAM-DK",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
.name = "NTSC-M",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_45_75 ),
},{
.std = V4L2_STD_NTSC_M_JP,
.name = "NTSC-M-JP",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_58_75 ),
}
};
static struct tvnorm radio_stereo = {
.name = "Radio Stereo",
.b = ( cFmRadio |
cQSS ),
.c = ( cDeemphasisOFF |
cAudioGain6 |
cTopDefault),
.e = ( cTunerGainLow |
cAudioIF_5_5 |
cRadioIF_38_90 ),
};
static struct tvnorm radio_mono = {
.name = "Radio Mono",
.b = ( cFmRadio |
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
cTopDefault),
.e = ( cTunerGainLow |
cAudioIF_5_5 |
cRadioIF_38_90 ),
};
/* ---------------------------------------------------------------------- */
static void dump_read_message(struct dvb_frontend *fe, unsigned char *buf)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
static char *afc[16] = {
"- 12.5 kHz",
"- 37.5 kHz",
"- 62.5 kHz",
"- 87.5 kHz",
"-112.5 kHz",
"-137.5 kHz",
"-162.5 kHz",
"-187.5 kHz [min]",
"+187.5 kHz [max]",
"+162.5 kHz",
"+137.5 kHz",
"+112.5 kHz",
"+ 87.5 kHz",
"+ 62.5 kHz",
"+ 37.5 kHz",
"+ 12.5 kHz",
};
tuner_info("read: 0x%2x\n", buf[0]);
tuner_info(" after power on : %s\n", (buf[0] & 0x01) ? "yes" : "no");
tuner_info(" afc : %s\n", afc[(buf[0] >> 1) & 0x0f]);
tuner_info(" fmif level : %s\n", (buf[0] & 0x20) ? "high" : "low");
tuner_info(" afc window : %s\n", (buf[0] & 0x40) ? "in" : "out");
tuner_info(" vfi level : %s\n", (buf[0] & 0x80) ? "high" : "low");
}
static void dump_write_message(struct dvb_frontend *fe, unsigned char *buf)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
static char *sound[4] = {
"AM/TV",
"FM/radio",
"FM/TV",
"FM/radio"
};
static char *adjust[32] = {
"-16", "-15", "-14", "-13", "-12", "-11", "-10", "-9",
"-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1",
"0", "+1", "+2", "+3", "+4", "+5", "+6", "+7",
"+8", "+9", "+10", "+11", "+12", "+13", "+14", "+15"
};
static char *deemph[4] = {
"no", "no", "75", "50"
};
static char *carrier[4] = {
"4.5 MHz",
"5.5 MHz",
"6.0 MHz",
"6.5 MHz / AM"
};
static char *vif[8] = {
"58.75 MHz",
"45.75 MHz",
"38.9 MHz",
"38.0 MHz",
"33.9 MHz",
"33.4 MHz",
"45.75 MHz + pin13",
"38.9 MHz + pin13",
};
static char *rif[4] = {
"44 MHz",
"52 MHz",
"52 MHz",
"44 MHz",
};
tuner_info("write: byte B 0x%02x\n", buf[1]);
tuner_info(" B0 video mode : %s\n",
(buf[1] & 0x01) ? "video trap" : "sound trap");
tuner_info(" B1 auto mute fm : %s\n",
(buf[1] & 0x02) ? "yes" : "no");
tuner_info(" B2 carrier mode : %s\n",
(buf[1] & 0x04) ? "QSS" : "Intercarrier");
tuner_info(" B3-4 tv sound/radio : %s\n",
sound[(buf[1] & 0x18) >> 3]);
tuner_info(" B5 force mute audio: %s\n",
(buf[1] & 0x20) ? "yes" : "no");
tuner_info(" B6 output port 1 : %s\n",
(buf[1] & 0x40) ? "high (inactive)" : "low (active)");
tuner_info(" B7 output port 2 : %s\n",
(buf[1] & 0x80) ? "high (inactive)" : "low (active)");
tuner_info("write: byte C 0x%02x\n", buf[2]);
tuner_info(" C0-4 top adjustment : %s dB\n",
adjust[buf[2] & 0x1f]);
tuner_info(" C5-6 de-emphasis : %s\n",
deemph[(buf[2] & 0x60) >> 5]);
tuner_info(" C7 audio gain : %s\n",
(buf[2] & 0x80) ? "-6" : "0");
tuner_info("write: byte E 0x%02x\n", buf[3]);
tuner_info(" E0-1 sound carrier : %s\n",
carrier[(buf[3] & 0x03)]);
tuner_info(" E6 l pll gating : %s\n",
(buf[3] & 0x40) ? "36" : "13");
if (buf[1] & 0x08) {
/* radio */
tuner_info(" E2-4 video if : %s\n",
rif[(buf[3] & 0x0c) >> 2]);
tuner_info(" E7 vif agc output : %s\n",
(buf[3] & 0x80)
? ((buf[3] & 0x10) ? "fm-agc radio" :
"sif-agc radio")
: "fm radio carrier afc");
} else {
/* video */
tuner_info(" E2-4 video if : %s\n",
vif[(buf[3] & 0x1c) >> 2]);
tuner_info(" E5 tuner gain : %s\n",
(buf[3] & 0x80)
? ((buf[3] & 0x20) ? "external" : "normal")
: ((buf[3] & 0x20) ? "minimum" : "normal"));
tuner_info(" E7 vif agc output : %s\n",
(buf[3] & 0x80) ? ((buf[3] & 0x20)
? "pin3 port, pin22 vif agc out"
: "pin22 port, pin3 vif acg ext in")
: "pin3+pin22 port");
}
tuner_info("--\n");
}
/* ---------------------------------------------------------------------- */
static int tda9887_set_tvnorm(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
struct tvnorm *norm = NULL;
char *buf = priv->data;
int i;
if (priv->mode == V4L2_TUNER_RADIO) {
if (priv->audmode == V4L2_TUNER_MODE_MONO)
norm = &radio_mono;
else
norm = &radio_stereo;
} else {
for (i = 0; i < ARRAY_SIZE(tvnorms); i++) {
if (tvnorms[i].std & priv->std) {
norm = tvnorms+i;
break;
}
}
}
if (NULL == norm) {
tuner_dbg("Unsupported tvnorm entry - audio muted\n");
return -1;
}
tuner_dbg("configure for: %s\n", norm->name);
buf[1] = norm->b;
buf[2] = norm->c;
buf[3] = norm->e;
return 0;
}
static unsigned int port1 = UNSET;
static unsigned int port2 = UNSET;
static unsigned int qss = UNSET;
static unsigned int adjust = UNSET;
module_param(port1, int, 0644);
module_param(port2, int, 0644);
module_param(qss, int, 0644);
module_param(adjust, int, 0644);
static int tda9887_set_insmod(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
char *buf = priv->data;
if (UNSET != port1) {
if (port1)
buf[1] |= cOutputPort1Inactive;
else
buf[1] &= ~cOutputPort1Inactive;
}
if (UNSET != port2) {
if (port2)
buf[1] |= cOutputPort2Inactive;
else
buf[1] &= ~cOutputPort2Inactive;
}
if (UNSET != qss) {
if (qss)
buf[1] |= cQSS;
else
buf[1] &= ~cQSS;
}
if (adjust < 0x20) {
buf[2] &= ~cTopMask;
buf[2] |= adjust;
}
return 0;
}
static int tda9887_do_config(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
char *buf = priv->data;
if (priv->config & TDA9887_PORT1_ACTIVE)
buf[1] &= ~cOutputPort1Inactive;
if (priv->config & TDA9887_PORT1_INACTIVE)
buf[1] |= cOutputPort1Inactive;
if (priv->config & TDA9887_PORT2_ACTIVE)
buf[1] &= ~cOutputPort2Inactive;
if (priv->config & TDA9887_PORT2_INACTIVE)
buf[1] |= cOutputPort2Inactive;
if (priv->config & TDA9887_QSS)
buf[1] |= cQSS;
if (priv->config & TDA9887_INTERCARRIER)
buf[1] &= ~cQSS;
if (priv->config & TDA9887_AUTOMUTE)
buf[1] |= cAutoMuteFmActive;
if (priv->config & TDA9887_DEEMPHASIS_MASK) {
buf[2] &= ~0x60;
switch (priv->config & TDA9887_DEEMPHASIS_MASK) {
case TDA9887_DEEMPHASIS_NONE:
buf[2] |= cDeemphasisOFF;
break;
case TDA9887_DEEMPHASIS_50:
buf[2] |= cDeemphasisON | cDeemphasis50;
break;
case TDA9887_DEEMPHASIS_75:
buf[2] |= cDeemphasisON | cDeemphasis75;
break;
}
}
if (priv->config & TDA9887_TOP_SET) {
buf[2] &= ~cTopMask;
buf[2] |= (priv->config >> 8) & cTopMask;
}
if ((priv->config & TDA9887_INTERCARRIER_NTSC) &&
(priv->std & V4L2_STD_NTSC))
buf[1] &= ~cQSS;
if (priv->config & TDA9887_GATING_18)
buf[3] &= ~cGating_36;
if (priv->mode == V4L2_TUNER_RADIO) {
if (priv->config & TDA9887_RIF_41_3) {
buf[3] &= ~cVideoIFMask;
buf[3] |= cRadioIF_41_30;
}
if (priv->config & TDA9887_GAIN_NORMAL)
buf[3] &= ~cTunerGainLow;
}
return 0;
}
/* ---------------------------------------------------------------------- */
static int tda9887_status(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
unsigned char buf[1];
int rc;
memset(buf,0,sizeof(buf));
if (1 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props,buf,1)))
tuner_info("i2c i/o error: rc == %d (should be 1)\n", rc);
dump_read_message(fe, buf);
return 0;
}
static void tda9887_configure(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
int rc;
memset(priv->data,0,sizeof(priv->data));
tda9887_set_tvnorm(fe);
/* A note on the port settings:
These settings tend to depend on the specifics of the board.
By default they are set to inactive (bit value 1) by this driver,
overwriting any changes made by the tvnorm. This means that it
is the responsibility of the module using the tda9887 to set
these values in case of changes in the tvnorm.
In many cases port 2 should be made active (0) when selecting
SECAM-L, and port 2 should remain inactive (1) for SECAM-L'.
For the other standards the tda9887 application note says that
the ports should be set to active (0), but, again, that may
differ depending on the precise hardware configuration.
*/
priv->data[1] |= cOutputPort1Inactive;
priv->data[1] |= cOutputPort2Inactive;
tda9887_do_config(fe);
tda9887_set_insmod(fe);
if (priv->standby)
priv->data[1] |= cForcedMuteAudioON;
tuner_dbg("writing: b=0x%02x c=0x%02x e=0x%02x\n",
priv->data[1], priv->data[2], priv->data[3]);
if (debug > 1)
dump_write_message(fe, priv->data);
if (4 != (rc = tuner_i2c_xfer_send(&priv->i2c_props,priv->data,4)))
tuner_info("i2c i/o error: rc == %d (should be 4)\n", rc);
if (debug > 2) {
msleep_interruptible(1000);
tda9887_status(fe);
}
}
/* ---------------------------------------------------------------------- */
static void tda9887_tuner_status(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
tuner_info("Data bytes: b=0x%02x c=0x%02x e=0x%02x\n",
priv->data[1], priv->data[2], priv->data[3]);
}
static int tda9887_get_afc(struct dvb_frontend *fe, s32 *afc)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
static const int AFC_BITS_2_kHz[] = {
-12500, -37500, -62500, -97500,
-112500, -137500, -162500, -187500,
187500, 162500, 137500, 112500,
97500 , 62500, 37500 , 12500
};
__u8 reg = 0;
if (priv->mode != V4L2_TUNER_RADIO)
return 0;
if (1 == tuner_i2c_xfer_recv(&priv->i2c_props, ®, 1))
*afc = AFC_BITS_2_kHz[(reg >> 1) & 0x0f];
return 0;
}
static void tda9887_standby(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
priv->standby = true;
tda9887_configure(fe);
}
static void tda9887_set_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
priv->standby = false;
priv->mode = params->mode;
priv->audmode = params->audmode;
priv->std = params->std;
tda9887_configure(fe);
}
static int tda9887_set_config(struct dvb_frontend *fe, void *priv_cfg)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
priv->config = *(unsigned int *)priv_cfg;
tda9887_configure(fe);
return 0;
}
static void tda9887_release(struct dvb_frontend *fe)
{
struct tda9887_priv *priv = fe->analog_demod_priv;
mutex_lock(&tda9887_list_mutex);
if (priv)
hybrid_tuner_release_state(priv);
mutex_unlock(&tda9887_list_mutex);
fe->analog_demod_priv = NULL;
}
static struct analog_demod_ops tda9887_ops = {
.info = {
.name = "tda9887",
},
.set_params = tda9887_set_params,
.standby = tda9887_standby,
.tuner_status = tda9887_tuner_status,
.get_afc = tda9887_get_afc,
.release = tda9887_release,
.set_config = tda9887_set_config,
};
struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap,
u8 i2c_addr)
{
struct tda9887_priv *priv = NULL;
int instance;
mutex_lock(&tda9887_list_mutex);
instance = hybrid_tuner_request_state(struct tda9887_priv, priv,
hybrid_tuner_instance_list,
i2c_adap, i2c_addr, "tda9887");
switch (instance) {
case 0:
mutex_unlock(&tda9887_list_mutex);
return NULL;
case 1:
fe->analog_demod_priv = priv;
priv->standby = true;
tuner_info("tda988[5/6/7] found\n");
break;
default:
fe->analog_demod_priv = priv;
break;
}
mutex_unlock(&tda9887_list_mutex);
memcpy(&fe->ops.analog_ops, &tda9887_ops,
sizeof(struct analog_demod_ops));
return fe;
}
EXPORT_SYMBOL_GPL(tda9887_attach);
MODULE_LICENSE("GPL");
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
carlocaione/linux-next | arch/um/os-Linux/sigio.c | 2513 | 11677 | /*
* Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <pty.h>
#include <sched.h>
#include <signal.h>
#include <string.h>
#include <kern_util.h>
#include <init.h>
#include <os.h>
#include <sigio.h>
#include <um_malloc.h>
/*
* Protected by sigio_lock(), also used by sigio_cleanup, which is an
* exitcall.
*/
static int write_sigio_pid = -1;
static unsigned long write_sigio_stack;
/*
* These arrays are initialized before the sigio thread is started, and
* the descriptors closed after it is killed. So, it can't see them change.
* On the UML side, they are changed under the sigio_lock.
*/
#define SIGIO_FDS_INIT {-1, -1}
static int write_sigio_fds[2] = SIGIO_FDS_INIT;
static int sigio_private[2] = SIGIO_FDS_INIT;
struct pollfds {
struct pollfd *poll;
int size;
int used;
};
/*
* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
* synchronizes with it.
*/
static struct pollfds current_poll;
static struct pollfds next_poll;
static struct pollfds all_sigio_fds;
static int write_sigio_thread(void *unused)
{
struct pollfds *fds, tmp;
struct pollfd *p;
int i, n, respond_fd;
char c;
os_fix_helper_signals();
fds = ¤t_poll;
while (1) {
n = poll(fds->poll, fds->used, -1);
if (n < 0) {
if (errno == EINTR)
continue;
printk(UM_KERN_ERR "write_sigio_thread : poll returned "
"%d, errno = %d\n", n, errno);
}
for (i = 0; i < fds->used; i++) {
p = &fds->poll[i];
if (p->revents == 0)
continue;
if (p->fd == sigio_private[1]) {
CATCH_EINTR(n = read(sigio_private[1], &c,
sizeof(c)));
if (n != sizeof(c))
printk(UM_KERN_ERR
"write_sigio_thread : "
"read on socket failed, "
"err = %d\n", errno);
tmp = current_poll;
current_poll = next_poll;
next_poll = tmp;
respond_fd = sigio_private[1];
}
else {
respond_fd = write_sigio_fds[1];
fds->used--;
memmove(&fds->poll[i], &fds->poll[i + 1],
(fds->used - i) * sizeof(*fds->poll));
}
CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
if (n != sizeof(c))
printk(UM_KERN_ERR "write_sigio_thread : "
"write on socket failed, err = %d\n",
errno);
}
}
return 0;
}
static int need_poll(struct pollfds *polls, int n)
{
struct pollfd *new;
if (n <= polls->size)
return 0;
new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
if (new == NULL) {
printk(UM_KERN_ERR "need_poll : failed to allocate new "
"pollfds\n");
return -ENOMEM;
}
memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
kfree(polls->poll);
polls->poll = new;
polls->size = n;
return 0;
}
/*
* Must be called with sigio_lock held, because it's needed by the marked
* critical section.
*/
static void update_thread(void)
{
unsigned long flags;
int n;
char c;
flags = set_signals(0);
CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
if (n != sizeof(c)) {
printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
errno);
goto fail;
}
CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
if (n != sizeof(c)) {
printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
errno);
goto fail;
}
set_signals(flags);
return;
fail:
/* Critical section start */
if (write_sigio_pid != -1) {
os_kill_process(write_sigio_pid, 1);
free_stack(write_sigio_stack, 0);
}
write_sigio_pid = -1;
close(sigio_private[0]);
close(sigio_private[1]);
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
/* Critical section end */
set_signals(flags);
}
int add_sigio_fd(int fd)
{
struct pollfd *p;
int err = 0, i, n;
sigio_lock();
for (i = 0; i < all_sigio_fds.used; i++) {
if (all_sigio_fds.poll[i].fd == fd)
break;
}
if (i == all_sigio_fds.used)
goto out;
p = &all_sigio_fds.poll[i];
for (i = 0; i < current_poll.used; i++) {
if (current_poll.poll[i].fd == fd)
goto out;
}
n = current_poll.used;
err = need_poll(&next_poll, n + 1);
if (err)
goto out;
memcpy(next_poll.poll, current_poll.poll,
current_poll.used * sizeof(struct pollfd));
next_poll.poll[n] = *p;
next_poll.used = n + 1;
update_thread();
out:
sigio_unlock();
return err;
}
int ignore_sigio_fd(int fd)
{
struct pollfd *p;
int err = 0, i, n = 0;
/*
* This is called from exitcalls elsewhere in UML - if
* sigio_cleanup has already run, then update_thread will hang
* or fail because the thread is no longer running.
*/
if (write_sigio_pid == -1)
return -EIO;
sigio_lock();
for (i = 0; i < current_poll.used; i++) {
if (current_poll.poll[i].fd == fd)
break;
}
if (i == current_poll.used)
goto out;
err = need_poll(&next_poll, current_poll.used - 1);
if (err)
goto out;
for (i = 0; i < current_poll.used; i++) {
p = ¤t_poll.poll[i];
if (p->fd != fd)
next_poll.poll[n++] = *p;
}
next_poll.used = current_poll.used - 1;
update_thread();
out:
sigio_unlock();
return err;
}
static struct pollfd *setup_initial_poll(int fd)
{
struct pollfd *p;
p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
if (p == NULL) {
printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
"poll\n");
return NULL;
}
*p = ((struct pollfd) { .fd = fd,
.events = POLLIN,
.revents = 0 });
return p;
}
static void write_sigio_workaround(void)
{
struct pollfd *p;
int err;
int l_write_sigio_fds[2];
int l_sigio_private[2];
int l_write_sigio_pid;
/* We call this *tons* of times - and most ones we must just fail. */
sigio_lock();
l_write_sigio_pid = write_sigio_pid;
sigio_unlock();
if (l_write_sigio_pid != -1)
return;
err = os_pipe(l_write_sigio_fds, 1, 1);
if (err < 0) {
printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
"err = %d\n", -err);
return;
}
err = os_pipe(l_sigio_private, 1, 1);
if (err < 0) {
printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
"err = %d\n", -err);
goto out_close1;
}
p = setup_initial_poll(l_sigio_private[1]);
if (!p)
goto out_close2;
sigio_lock();
/*
* Did we race? Don't try to optimize this, please, it's not so likely
* to happen, and no more than once at the boot.
*/
if (write_sigio_pid != -1)
goto out_free;
current_poll = ((struct pollfds) { .poll = p,
.used = 1,
.size = 1 });
if (write_sigio_irq(l_write_sigio_fds[0]))
goto out_clear_poll;
memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
CLONE_FILES | CLONE_VM,
&write_sigio_stack);
if (write_sigio_pid < 0)
goto out_clear;
sigio_unlock();
return;
out_clear:
write_sigio_pid = -1;
write_sigio_fds[0] = -1;
write_sigio_fds[1] = -1;
sigio_private[0] = -1;
sigio_private[1] = -1;
out_clear_poll:
current_poll = ((struct pollfds) { .poll = NULL,
.size = 0,
.used = 0 });
out_free:
sigio_unlock();
kfree(p);
out_close2:
close(l_sigio_private[0]);
close(l_sigio_private[1]);
out_close1:
close(l_write_sigio_fds[0]);
close(l_write_sigio_fds[1]);
}
void sigio_broken(int fd, int read)
{
int err;
write_sigio_workaround();
sigio_lock();
err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
if (err) {
printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
"for descriptor %d\n", fd);
goto out;
}
all_sigio_fds.poll[all_sigio_fds.used++] =
((struct pollfd) { .fd = fd,
.events = read ? POLLIN : POLLOUT,
.revents = 0 });
out:
sigio_unlock();
}
/* Changed during early boot */
static int pty_output_sigio;
static int pty_close_sigio;
void maybe_sigio_broken(int fd, int read)
{
if (!isatty(fd))
return;
if ((read || pty_output_sigio) && (!read || pty_close_sigio))
return;
sigio_broken(fd, read);
}
static void sigio_cleanup(void)
{
if (write_sigio_pid == -1)
return;
os_kill_process(write_sigio_pid, 1);
free_stack(write_sigio_stack, 0);
write_sigio_pid = -1;
}
__uml_exitcall(sigio_cleanup);
/* Used as a flag during SIGIO testing early in boot */
static int got_sigio;
static void __init handler(int sig)
{
got_sigio = 1;
}
struct openpty_arg {
int master;
int slave;
int err;
};
static void openpty_cb(void *arg)
{
struct openpty_arg *info = arg;
info->err = 0;
if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
info->err = -errno;
}
static int async_pty(int master, int slave)
{
int flags;
flags = fcntl(master, F_GETFL);
if (flags < 0)
return -errno;
if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
(fcntl(master, F_SETOWN, os_getpid()) < 0))
return -errno;
if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
return -errno;
return 0;
}
static void __init check_one_sigio(void (*proc)(int, int))
{
struct sigaction old, new;
struct openpty_arg pty = { .master = -1, .slave = -1 };
int master, slave, err;
initial_thread_cb(openpty_cb, &pty);
if (pty.err) {
printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
-pty.err);
return;
}
master = pty.master;
slave = pty.slave;
if ((master == -1) || (slave == -1)) {
printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
"pty\n");
return;
}
/* Not now, but complain so we now where we failed. */
err = raw(master);
if (err < 0) {
printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
-err);
return;
}
err = async_pty(master, slave);
if (err < 0) {
printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
"err = %d\n", -err);
return;
}
if (sigaction(SIGIO, NULL, &old) < 0) {
printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
"errno = %d\n", errno);
return;
}
new = old;
new.sa_handler = handler;
if (sigaction(SIGIO, &new, NULL) < 0) {
printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
"errno = %d\n", errno);
return;
}
got_sigio = 0;
(*proc)(master, slave);
close(master);
close(slave);
if (sigaction(SIGIO, &old, NULL) < 0)
printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
"errno = %d\n", errno);
}
static void tty_output(int master, int slave)
{
int n;
char buf[512];
printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
memset(buf, 0, sizeof(buf));
while (write(master, buf, sizeof(buf)) > 0) ;
if (errno != EAGAIN)
printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
errno);
while (((n = read(slave, buf, sizeof(buf))) > 0) &&
!({ barrier(); got_sigio; }))
;
if (got_sigio) {
printk(UM_KERN_CONT "Yes\n");
pty_output_sigio = 1;
} else if (n == -EAGAIN)
printk(UM_KERN_CONT "No, enabling workaround\n");
else
printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
}
static void tty_close(int master, int slave)
{
printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
"close...");
close(slave);
if (got_sigio) {
printk(UM_KERN_CONT "Yes\n");
pty_close_sigio = 1;
} else
printk(UM_KERN_CONT "No, enabling workaround\n");
}
static void __init check_sigio(void)
{
if ((access("/dev/ptmx", R_OK) < 0) &&
(access("/dev/ptyp0", R_OK) < 0)) {
printk(UM_KERN_WARNING "No pseudo-terminals available - "
"skipping pty SIGIO check\n");
return;
}
check_one_sigio(tty_output);
check_one_sigio(tty_close);
}
/* Here because it only does the SIGIO testing for now */
void __init os_check_bugs(void)
{
check_sigio();
}
| gpl-2.0 |
kularny/GeniSys.Kernel | drivers/watchdog/sbc7240_wdt.c | 2513 | 7470 | /*
* NANO7240 SBC Watchdog device driver
*
* Based on w83877f.c by Scott Jennings,
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* (c) Copyright 2007 Gilles GIGAN <gilles.gigan@jcu.edu.au>
*
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <asm/system.h>
#define SBC7240_PREFIX "sbc7240_wdt: "
#define SBC7240_ENABLE_PORT 0x443
#define SBC7240_DISABLE_PORT 0x043
#define SBC7240_SET_TIMEOUT_PORT SBC7240_ENABLE_PORT
#define SBC7240_MAGIC_CHAR 'V'
#define SBC7240_TIMEOUT 30
#define SBC7240_MAX_TIMEOUT 255
static int timeout = SBC7240_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. (1<=timeout<="
__MODULE_STRING(SBC7240_MAX_TIMEOUT) ", default="
__MODULE_STRING(SBC7240_TIMEOUT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog when closing device file");
#define SBC7240_OPEN_STATUS_BIT 0
#define SBC7240_ENABLED_STATUS_BIT 1
#define SBC7240_EXPECT_CLOSE_STATUS_BIT 2
static unsigned long wdt_status;
/*
* Utility routines
*/
static void wdt_disable(void)
{
/* disable the watchdog */
if (test_and_clear_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) {
inb_p(SBC7240_DISABLE_PORT);
printk(KERN_INFO SBC7240_PREFIX
"Watchdog timer is now disabled.\n");
}
}
static void wdt_enable(void)
{
/* enable the watchdog */
if (!test_and_set_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status)) {
inb_p(SBC7240_ENABLE_PORT);
printk(KERN_INFO SBC7240_PREFIX
"Watchdog timer is now enabled.\n");
}
}
static int wdt_set_timeout(int t)
{
if (t < 1 || t > SBC7240_MAX_TIMEOUT) {
printk(KERN_ERR SBC7240_PREFIX
"timeout value must be 1<=x<=%d\n",
SBC7240_MAX_TIMEOUT);
return -1;
}
/* set the timeout */
outb_p((unsigned)t, SBC7240_SET_TIMEOUT_PORT);
timeout = t;
printk(KERN_INFO SBC7240_PREFIX "timeout set to %d seconds\n", t);
return 0;
}
/* Whack the dog */
static inline void wdt_keepalive(void)
{
if (test_bit(SBC7240_ENABLED_STATUS_BIT, &wdt_status))
inb_p(SBC7240_ENABLE_PORT);
}
/*
* /dev/watchdog handling
*/
static ssize_t fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
size_t i;
char c;
if (count) {
if (!nowayout) {
clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT,
&wdt_status);
/* is there a magic char ? */
for (i = 0; i != count; i++) {
if (get_user(c, buf + i))
return -EFAULT;
if (c == SBC7240_MAGIC_CHAR) {
set_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT,
&wdt_status);
break;
}
}
}
wdt_keepalive();
}
return count;
}
static int fop_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status))
return -EBUSY;
wdt_enable();
return nonseekable_open(inode, file);
}
static int fop_close(struct inode *inode, struct file *file)
{
if (test_and_clear_bit(SBC7240_EXPECT_CLOSE_STATUS_BIT, &wdt_status)
|| !nowayout) {
wdt_disable();
} else {
printk(KERN_CRIT SBC7240_PREFIX
"Unexpected close, not stopping watchdog!\n");
wdt_keepalive();
}
clear_bit(SBC7240_OPEN_STATUS_BIT, &wdt_status);
return 0;
}
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING|
WDIOF_SETTIMEOUT|
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "SBC7240",
};
static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user((void __user *)arg, &ident, sizeof(ident))
? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, (int __user *)arg);
case WDIOC_SETOPTIONS:
{
int options;
int retval = -EINVAL;
if (get_user(options, (int __user *)arg))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
wdt_disable();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
wdt_enable();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
wdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
{
int new_timeout;
if (get_user(new_timeout, (int __user *)arg))
return -EFAULT;
if (wdt_set_timeout(new_timeout))
return -EINVAL;
/* Fall through */
}
case WDIOC_GETTIMEOUT:
return put_user(timeout, (int __user *)arg);
default:
return -ENOTTY;
}
}
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = fop_write,
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
};
static struct miscdevice wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
/*
* Notifier for system down
*/
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdt_disable();
return NOTIFY_DONE;
}
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static void __exit sbc7240_wdt_unload(void)
{
printk(KERN_INFO SBC7240_PREFIX "Removing watchdog\n");
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
release_region(SBC7240_ENABLE_PORT, 1);
}
static int __init sbc7240_wdt_init(void)
{
int rc = -EBUSY;
if (!request_region(SBC7240_ENABLE_PORT, 1, "SBC7240 WDT")) {
printk(KERN_ERR SBC7240_PREFIX
"I/O address 0x%04x already in use\n",
SBC7240_ENABLE_PORT);
rc = -EIO;
goto err_out;
}
/* The IO port 0x043 used to disable the watchdog
* is already claimed by the system timer, so we
* can't request_region() it ...*/
if (timeout < 1 || timeout > SBC7240_MAX_TIMEOUT) {
timeout = SBC7240_TIMEOUT;
printk(KERN_INFO SBC7240_PREFIX
"timeout value must be 1<=x<=%d, using %d\n",
SBC7240_MAX_TIMEOUT, timeout);
}
wdt_set_timeout(timeout);
wdt_disable();
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
printk(KERN_ERR SBC7240_PREFIX
"cannot register reboot notifier (err=%d)\n", rc);
goto err_out_region;
}
rc = misc_register(&wdt_miscdev);
if (rc) {
printk(KERN_ERR SBC7240_PREFIX
"cannot register miscdev on minor=%d (err=%d)\n",
wdt_miscdev.minor, rc);
goto err_out_reboot_notifier;
}
printk(KERN_INFO SBC7240_PREFIX
"Watchdog driver for SBC7240 initialised (nowayout=%d)\n",
nowayout);
return 0;
err_out_reboot_notifier:
unregister_reboot_notifier(&wdt_notifier);
err_out_region:
release_region(SBC7240_ENABLE_PORT, 1);
err_out:
return rc;
}
module_init(sbc7240_wdt_init);
module_exit(sbc7240_wdt_unload);
MODULE_AUTHOR("Gilles Gigan");
MODULE_DESCRIPTION("Watchdog device driver for single board"
" computers EPIC Nano 7240 from iEi");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
J-Team/android_kernel_samsung_u8500 | net/mac80211/debugfs_sta.c | 2769 | 11728 | /*
* Copyright 2003-2005 Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/debugfs.h>
#include <linux/ieee80211.h>
#include "ieee80211_i.h"
#include "debugfs.h"
#include "debugfs_sta.h"
#include "sta_info.h"
/* sta attributtes */
#define STA_READ(name, field, format_string) \
static ssize_t sta_ ##name## _read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct sta_info *sta = file->private_data; \
return mac80211_format_buffer(userbuf, count, ppos, \
format_string, sta->field); \
}
#define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
#define STA_READ_U(name, field) STA_READ(name, field, "%u\n")
#define STA_READ_S(name, field) STA_READ(name, field, "%s\n")
#define STA_OPS(name) \
static const struct file_operations sta_ ##name## _ops = { \
.read = sta_##name##_read, \
.open = mac80211_open_file_generic, \
.llseek = generic_file_llseek, \
}
#define STA_OPS_RW(name) \
static const struct file_operations sta_ ##name## _ops = { \
.read = sta_##name##_read, \
.write = sta_##name##_write, \
.open = mac80211_open_file_generic, \
.llseek = generic_file_llseek, \
}
#define STA_FILE(name, field, format) \
STA_READ_##format(name, field) \
STA_OPS(name)
STA_FILE(aid, sta.aid, D);
STA_FILE(dev, sdata->name, S);
STA_FILE(last_signal, last_signal, D);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char buf[100];
struct sta_info *sta = file->private_data;
u32 staflags = get_sta_flags(sta);
int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s",
staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "",
staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "",
staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
staflags & WLAN_STA_WME ? "WME\n" : "",
staflags & WLAN_STA_WDS ? "WDS\n" : "",
staflags & WLAN_STA_MFP ? "MFP\n" : "");
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
STA_OPS(flags);
static ssize_t sta_num_ps_buf_frames_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
return mac80211_format_buffer(userbuf, count, ppos, "%u\n",
skb_queue_len(&sta->ps_tx_buf));
}
STA_OPS(num_ps_buf_frames);
static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
return mac80211_format_buffer(userbuf, count, ppos, "%d\n",
jiffies_to_msecs(jiffies - sta->last_rx));
}
STA_OPS(inactive_ms);
static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
struct timespec uptime;
struct tm result;
long connected_time_secs;
char buf[100];
int res;
do_posix_clock_monotonic_gettime(&uptime);
connected_time_secs = uptime.tv_sec - sta->last_connected;
time_to_tm(connected_time_secs, 0, &result);
result.tm_year -= 70;
result.tm_mday -= 1;
res = scnprintf(buf, sizeof(buf),
"years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n",
result.tm_year, result.tm_mon, result.tm_mday,
result.tm_hour, result.tm_min, result.tm_sec);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
STA_OPS(connected_time);
static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char buf[15*NUM_RX_DATA_QUEUES], *p = buf;
int i;
struct sta_info *sta = file->private_data;
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%x ",
le16_to_cpu(sta->last_seq_ctrl[i]));
p += scnprintf(p, sizeof(buf)+buf-p, "\n");
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(last_seq_ctrl);
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
char buf[71 + STA_TID_NUM * 40], *p = buf;
int i;
struct sta_info *sta = file->private_data;
struct tid_ampdu_rx *tid_rx;
struct tid_ampdu_tx *tid_tx;
rcu_read_lock();
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
"TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
for (i = 0; i < STA_TID_NUM; i++) {
tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_rx);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
tid_rx ? tid_rx->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
tid_rx ? tid_rx->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
tid_tx ? tid_tx->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
tid_tx ? skb_queue_len(&tid_tx->pending) : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\n");
}
rcu_read_unlock();
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
char _buf[12], *buf = _buf;
struct sta_info *sta = file->private_data;
bool start, tx;
unsigned long tid;
int ret;
if (count > sizeof(_buf))
return -EINVAL;
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[sizeof(_buf) - 1] = '\0';
if (strncmp(buf, "tx ", 3) == 0) {
buf += 3;
tx = true;
} else if (strncmp(buf, "rx ", 3) == 0) {
buf += 3;
tx = false;
} else
return -EINVAL;
if (strncmp(buf, "start ", 6) == 0) {
buf += 6;
start = true;
if (!tx)
return -EINVAL;
} else if (strncmp(buf, "stop ", 5) == 0) {
buf += 5;
start = false;
} else
return -EINVAL;
tid = simple_strtoul(buf, NULL, 0);
if (tid >= STA_TID_NUM)
return -EINVAL;
if (tx) {
if (start)
ret = ieee80211_start_tx_ba_session(&sta->sta, tid, 5000);
else
ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
} else {
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
3, true);
ret = 0;
}
return ret ?: count;
}
STA_OPS_RW(agg_status);
static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
#define PRINT_HT_CAP(_cond, _str) \
do { \
if (_cond) \
p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
} while (0)
char buf[512], *p = buf;
int i;
struct sta_info *sta = file->private_data;
struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
htc->ht_supported ? "" : "not ");
if (htc->ht_supported) {
p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC");
PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
"3839 bytes");
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
"7935 bytes");
/*
* For beacons and probe response this would mean the BSS
* does or does not allow the usage of DSSS/CCK HT40.
* Otherwise it means the STA does or does not use
* DSSS/CCK HT40.
*/
PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
/* BIT(13) is reserved */
PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
htc->ampdu_factor, htc->ampdu_density);
p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
htc->mcs.rx_mask[i]);
p += scnprintf(p, sizeof(buf)+buf-p, "\n");
/* If not set this is meaningless */
if (le16_to_cpu(htc->mcs.rx_highest)) {
p += scnprintf(p, sizeof(buf)+buf-p,
"MCS rx highest: %d Mbps\n",
le16_to_cpu(htc->mcs.rx_highest));
}
p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
htc->mcs.tx_params);
}
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(ht_capa);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
sta->debugfs.dir, sta, &sta_ ##name## _ops);
#define DEBUGFS_ADD_COUNTER(name, field) \
if (sizeof(sta->field) == sizeof(u32)) \
debugfs_create_u32(#name, 0400, sta->debugfs.dir, \
(u32 *) &sta->field); \
else \
debugfs_create_u64(#name, 0400, sta->debugfs.dir, \
(u64 *) &sta->field);
void ieee80211_sta_debugfs_add(struct sta_info *sta)
{
struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations;
u8 mac[3*ETH_ALEN];
sta->debugfs.add_has_run = true;
if (!stations_dir)
return;
snprintf(mac, sizeof(mac), "%pM", sta->sta.addr);
/*
* This might fail due to a race condition:
* When mac80211 unlinks a station, the debugfs entries
* remain, but it is already possible to link a new
* station with the same address which triggers adding
* it to debugfs; therefore, if the old station isn't
* destroyed quickly enough the old station's debugfs
* dir might still be around.
*/
sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
if (!sta->debugfs.dir)
return;
DEBUGFS_ADD(flags);
DEBUGFS_ADD(num_ps_buf_frames);
DEBUGFS_ADD(inactive_ms);
DEBUGFS_ADD(connected_time);
DEBUGFS_ADD(last_seq_ctrl);
DEBUGFS_ADD(agg_status);
DEBUGFS_ADD(dev);
DEBUGFS_ADD(last_signal);
DEBUGFS_ADD(ht_capa);
DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes);
DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes);
DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped);
DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments);
DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
}
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
{
debugfs_remove_recursive(sta->debugfs.dir);
sta->debugfs.dir = NULL;
}
| gpl-2.0 |
NooNameR/Dirty | drivers/isdn/hisax/elsa_ser.c | 3025 | 17010 | /* $Id: elsa_ser.c,v 2.14.2.3 2004/02/11 13:21:33 keil Exp $
*
* stuff for the serial modem on ELSA cards
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#define MAX_MODEM_BUF 256
#define WAKEUP_CHARS (MAX_MODEM_BUF/2)
#define RS_ISR_PASS_LIMIT 256
#define BASE_BAUD ( 1843200 / 16 )
//#define SERIAL_DEBUG_OPEN 1
//#define SERIAL_DEBUG_INTR 1
//#define SERIAL_DEBUG_FLOW 1
#undef SERIAL_DEBUG_OPEN
#undef SERIAL_DEBUG_INTR
#undef SERIAL_DEBUG_FLOW
#undef SERIAL_DEBUG_REG
//#define SERIAL_DEBUG_REG 1
#ifdef SERIAL_DEBUG_REG
static u_char deb[32];
const char *ModemIn[] = {"RBR","IER","IIR","LCR","MCR","LSR","MSR","SCR"};
const char *ModemOut[] = {"THR","IER","FCR","LCR","MCR","LSR","MSR","SCR"};
#endif
static char *MInit_1 = "AT&F&C1E0&D2\r\0";
static char *MInit_2 = "ATL2M1S64=13\r\0";
static char *MInit_3 = "AT+FCLASS=0\r\0";
static char *MInit_4 = "ATV1S2=128X1\r\0";
static char *MInit_5 = "AT\\V8\\N3\r\0";
static char *MInit_6 = "ATL0M0&G0%E1\r\0";
static char *MInit_7 = "AT%L1%M0%C3\r\0";
static char *MInit_speed28800 = "AT%G0%B28800\r\0";
static char *MInit_dialout = "ATs7=60 x1 d\r\0";
static char *MInit_dialin = "ATs7=60 x1 a\r\0";
static inline unsigned int serial_in(struct IsdnCardState *cs, int offset)
{
#ifdef SERIAL_DEBUG_REG
u_int val = inb(cs->hw.elsa.base + 8 + offset);
debugl1(cs,"in %s %02x",ModemIn[offset], val);
return(val);
#else
return inb(cs->hw.elsa.base + 8 + offset);
#endif
}
static inline unsigned int serial_inp(struct IsdnCardState *cs, int offset)
{
#ifdef SERIAL_DEBUG_REG
#ifdef ELSA_SERIAL_NOPAUSE_IO
u_int val = inb(cs->hw.elsa.base + 8 + offset);
debugl1(cs,"inp %s %02x",ModemIn[offset], val);
#else
u_int val = inb_p(cs->hw.elsa.base + 8 + offset);
debugl1(cs,"inP %s %02x",ModemIn[offset], val);
#endif
return(val);
#else
#ifdef ELSA_SERIAL_NOPAUSE_IO
return inb(cs->hw.elsa.base + 8 + offset);
#else
return inb_p(cs->hw.elsa.base + 8 + offset);
#endif
#endif
}
static inline void serial_out(struct IsdnCardState *cs, int offset, int value)
{
#ifdef SERIAL_DEBUG_REG
debugl1(cs,"out %s %02x",ModemOut[offset], value);
#endif
outb(value, cs->hw.elsa.base + 8 + offset);
}
static inline void serial_outp(struct IsdnCardState *cs, int offset,
int value)
{
#ifdef SERIAL_DEBUG_REG
#ifdef ELSA_SERIAL_NOPAUSE_IO
debugl1(cs,"outp %s %02x",ModemOut[offset], value);
#else
debugl1(cs,"outP %s %02x",ModemOut[offset], value);
#endif
#endif
#ifdef ELSA_SERIAL_NOPAUSE_IO
outb(value, cs->hw.elsa.base + 8 + offset);
#else
outb_p(value, cs->hw.elsa.base + 8 + offset);
#endif
}
/*
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
static void change_speed(struct IsdnCardState *cs, int baud)
{
int quot = 0, baud_base;
unsigned cval, fcr = 0;
/* byte size and parity */
cval = 0x03;
/* Determine divisor based on baud rate */
baud_base = BASE_BAUD;
quot = baud_base / baud;
/* If the quotient is ever zero, default to 9600 bps */
if (!quot)
quot = baud_base / 9600;
/* Set up FIFO's */
if ((baud_base / quot) < 2400)
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
else
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_8;
serial_outp(cs, UART_FCR, fcr);
/* CTS flow control flag and modem status interrupts */
cs->hw.elsa.IER &= ~UART_IER_MSI;
cs->hw.elsa.IER |= UART_IER_MSI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
debugl1(cs,"modem quot=0x%x", quot);
serial_outp(cs, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
serial_outp(cs, UART_DLL, quot & 0xff); /* LS of divisor */
serial_outp(cs, UART_DLM, quot >> 8); /* MS of divisor */
serial_outp(cs, UART_LCR, cval); /* reset DLAB */
serial_inp(cs, UART_RX);
}
static int mstartup(struct IsdnCardState *cs)
{
int retval=0;
/*
* Clear the FIFO buffers and disable them
* (they will be reenabled in change_speed())
*/
serial_outp(cs, UART_FCR, (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT));
/*
* At this point there's no way the LSR could still be 0xFF;
* if it is, then bail out, because there's likely no UART
* here.
*/
if (serial_inp(cs, UART_LSR) == 0xff) {
retval = -ENODEV;
goto errout;
}
/*
* Clear the interrupt registers.
*/
(void) serial_inp(cs, UART_RX);
(void) serial_inp(cs, UART_IIR);
(void) serial_inp(cs, UART_MSR);
/*
* Now, initialize the UART
*/
serial_outp(cs, UART_LCR, UART_LCR_WLEN8); /* reset DLAB */
cs->hw.elsa.MCR = 0;
cs->hw.elsa.MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
serial_outp(cs, UART_MCR, cs->hw.elsa.MCR);
/*
* Finally, enable interrupts
*/
cs->hw.elsa.IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER); /* enable interrupts */
/*
* And clear the interrupt registers again for luck.
*/
(void)serial_inp(cs, UART_LSR);
(void)serial_inp(cs, UART_RX);
(void)serial_inp(cs, UART_IIR);
(void)serial_inp(cs, UART_MSR);
cs->hw.elsa.transcnt = cs->hw.elsa.transp = 0;
cs->hw.elsa.rcvcnt = cs->hw.elsa.rcvp =0;
/*
* and set the speed of the serial port
*/
change_speed(cs, BASE_BAUD);
cs->hw.elsa.MFlag = 1;
errout:
return retval;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void mshutdown(struct IsdnCardState *cs)
{
#ifdef SERIAL_DEBUG_OPEN
printk(KERN_DEBUG"Shutting down serial ....");
#endif
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free the irq
* here so the queue might never be waken up
*/
cs->hw.elsa.IER = 0;
serial_outp(cs, UART_IER, 0x00); /* disable all intrs */
cs->hw.elsa.MCR &= ~UART_MCR_OUT2;
/* disable break condition */
serial_outp(cs, UART_LCR, serial_inp(cs, UART_LCR) & ~UART_LCR_SBC);
cs->hw.elsa.MCR &= ~(UART_MCR_DTR|UART_MCR_RTS);
serial_outp(cs, UART_MCR, cs->hw.elsa.MCR);
/* disable FIFO's */
serial_outp(cs, UART_FCR, (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT));
serial_inp(cs, UART_RX); /* read data port to reset things */
#ifdef SERIAL_DEBUG_OPEN
printk(" done\n");
#endif
}
static inline int
write_modem(struct BCState *bcs) {
int ret=0;
struct IsdnCardState *cs = bcs->cs;
int count, len, fp;
if (!bcs->tx_skb)
return 0;
if (bcs->tx_skb->len <= 0)
return 0;
len = bcs->tx_skb->len;
if (len > MAX_MODEM_BUF - cs->hw.elsa.transcnt)
len = MAX_MODEM_BUF - cs->hw.elsa.transcnt;
fp = cs->hw.elsa.transcnt + cs->hw.elsa.transp;
fp &= (MAX_MODEM_BUF -1);
count = len;
if (count > MAX_MODEM_BUF - fp) {
count = MAX_MODEM_BUF - fp;
skb_copy_from_linear_data(bcs->tx_skb,
cs->hw.elsa.transbuf + fp, count);
skb_pull(bcs->tx_skb, count);
cs->hw.elsa.transcnt += count;
ret = count;
count = len - count;
fp = 0;
}
skb_copy_from_linear_data(bcs->tx_skb,
cs->hw.elsa.transbuf + fp, count);
skb_pull(bcs->tx_skb, count);
cs->hw.elsa.transcnt += count;
ret += count;
if (cs->hw.elsa.transcnt &&
!(cs->hw.elsa.IER & UART_IER_THRI)) {
cs->hw.elsa.IER |= UART_IER_THRI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
}
return(ret);
}
static inline void
modem_fill(struct BCState *bcs) {
if (bcs->tx_skb) {
if (bcs->tx_skb->len) {
write_modem(bcs);
return;
} else {
if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
(PACKET_NOACK != bcs->tx_skb->pkt_type)) {
u_long flags;
spin_lock_irqsave(&bcs->aclock, flags);
bcs->ackcnt += bcs->hw.hscx.count;
spin_unlock_irqrestore(&bcs->aclock, flags);
schedule_event(bcs, B_ACKPENDING);
}
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
}
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
bcs->hw.hscx.count = 0;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
write_modem(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
schedule_event(bcs, B_XMTBUFREADY);
}
}
static inline void receive_chars(struct IsdnCardState *cs,
int *status)
{
unsigned char ch;
struct sk_buff *skb;
do {
ch = serial_in(cs, UART_RX);
if (cs->hw.elsa.rcvcnt >= MAX_MODEM_BUF)
break;
cs->hw.elsa.rcvbuf[cs->hw.elsa.rcvcnt++] = ch;
#ifdef SERIAL_DEBUG_INTR
printk("DR%02x:%02x...", ch, *status);
#endif
if (*status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE)) {
#ifdef SERIAL_DEBUG_INTR
printk("handling exept....");
#endif
}
*status = serial_inp(cs, UART_LSR);
} while (*status & UART_LSR_DR);
if (cs->hw.elsa.MFlag == 2) {
if (!(skb = dev_alloc_skb(cs->hw.elsa.rcvcnt)))
printk(KERN_WARNING "ElsaSER: receive out of memory\n");
else {
memcpy(skb_put(skb, cs->hw.elsa.rcvcnt), cs->hw.elsa.rcvbuf,
cs->hw.elsa.rcvcnt);
skb_queue_tail(& cs->hw.elsa.bcs->rqueue, skb);
}
schedule_event(cs->hw.elsa.bcs, B_RCVBUFREADY);
} else {
char tmp[128];
char *t = tmp;
t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
debugl1(cs, tmp);
}
cs->hw.elsa.rcvcnt = 0;
}
static inline void transmit_chars(struct IsdnCardState *cs, int *intr_done)
{
int count;
debugl1(cs, "transmit_chars: p(%x) cnt(%x)", cs->hw.elsa.transp,
cs->hw.elsa.transcnt);
if (cs->hw.elsa.transcnt <= 0) {
cs->hw.elsa.IER &= ~UART_IER_THRI;
serial_out(cs, UART_IER, cs->hw.elsa.IER);
return;
}
count = 16;
do {
serial_outp(cs, UART_TX, cs->hw.elsa.transbuf[cs->hw.elsa.transp++]);
if (cs->hw.elsa.transp >= MAX_MODEM_BUF)
cs->hw.elsa.transp=0;
if (--cs->hw.elsa.transcnt <= 0)
break;
} while (--count > 0);
if ((cs->hw.elsa.transcnt < WAKEUP_CHARS) && (cs->hw.elsa.MFlag==2))
modem_fill(cs->hw.elsa.bcs);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
#endif
if (intr_done)
*intr_done = 0;
if (cs->hw.elsa.transcnt <= 0) {
cs->hw.elsa.IER &= ~UART_IER_THRI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
}
}
static void rs_interrupt_elsa(struct IsdnCardState *cs)
{
int status, iir, msr;
int pass_counter = 0;
#ifdef SERIAL_DEBUG_INTR
printk(KERN_DEBUG "rs_interrupt_single(%d)...", cs->irq);
#endif
do {
status = serial_inp(cs, UART_LSR);
debugl1(cs,"rs LSR %02x", status);
#ifdef SERIAL_DEBUG_INTR
printk("status = %x...", status);
#endif
if (status & UART_LSR_DR)
receive_chars(cs, &status);
if (status & UART_LSR_THRE)
transmit_chars(cs, NULL);
if (pass_counter++ > RS_ISR_PASS_LIMIT) {
printk("rs_single loop break.\n");
break;
}
iir = serial_inp(cs, UART_IIR);
debugl1(cs,"rs IIR %02x", iir);
if ((iir & 0xf) == 0) {
msr = serial_inp(cs, UART_MSR);
debugl1(cs,"rs MSR %02x", msr);
}
} while (!(iir & UART_IIR_NO_INT));
#ifdef SERIAL_DEBUG_INTR
printk("end.\n");
#endif
}
extern int open_hscxstate(struct IsdnCardState *cs, struct BCState *bcs);
extern void modehscx(struct BCState *bcs, int mode, int bc);
extern void hscx_l2l1(struct PStack *st, int pr, void *arg);
static void
close_elsastate(struct BCState *bcs)
{
modehscx(bcs, 0, bcs->channel);
if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
if (bcs->hw.hscx.rcvbuf) {
if (bcs->mode != L1_MODE_MODEM)
kfree(bcs->hw.hscx.rcvbuf);
bcs->hw.hscx.rcvbuf = NULL;
}
skb_queue_purge(&bcs->rqueue);
skb_queue_purge(&bcs->squeue);
if (bcs->tx_skb) {
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
}
}
}
static void
modem_write_cmd(struct IsdnCardState *cs, u_char *buf, int len) {
int count, fp;
u_char *msg = buf;
if (!len)
return;
if (len > (MAX_MODEM_BUF - cs->hw.elsa.transcnt)) {
return;
}
fp = cs->hw.elsa.transcnt + cs->hw.elsa.transp;
fp &= (MAX_MODEM_BUF -1);
count = len;
if (count > MAX_MODEM_BUF - fp) {
count = MAX_MODEM_BUF - fp;
memcpy(cs->hw.elsa.transbuf + fp, msg, count);
cs->hw.elsa.transcnt += count;
msg += count;
count = len - count;
fp = 0;
}
memcpy(cs->hw.elsa.transbuf + fp, msg, count);
cs->hw.elsa.transcnt += count;
if (cs->hw.elsa.transcnt &&
!(cs->hw.elsa.IER & UART_IER_THRI)) {
cs->hw.elsa.IER |= UART_IER_THRI;
serial_outp(cs, UART_IER, cs->hw.elsa.IER);
}
}
static void
modem_set_init(struct IsdnCardState *cs) {
int timeout;
#define RCV_DELAY 20
modem_write_cmd(cs, MInit_1, strlen(MInit_1));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_2, strlen(MInit_2));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_3, strlen(MInit_3));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_4, strlen(MInit_4));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_5, strlen(MInit_5));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_6, strlen(MInit_6));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
modem_write_cmd(cs, MInit_7, strlen(MInit_7));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
}
static void
modem_set_dial(struct IsdnCardState *cs, int outgoing) {
int timeout;
#define RCV_DELAY 20
modem_write_cmd(cs, MInit_speed28800, strlen(MInit_speed28800));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
if (outgoing)
modem_write_cmd(cs, MInit_dialout, strlen(MInit_dialout));
else
modem_write_cmd(cs, MInit_dialin, strlen(MInit_dialin));
timeout = 1000;
while(timeout-- && cs->hw.elsa.transcnt)
udelay(1000);
debugl1(cs, "msi tout=%d", timeout);
mdelay(RCV_DELAY);
}
static void
modem_l2l1(struct PStack *st, int pr, void *arg)
{
struct BCState *bcs = st->l1.bcs;
struct sk_buff *skb = arg;
u_long flags;
if (pr == (PH_DATA | REQUEST)) {
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->tx_skb) {
skb_queue_tail(&bcs->squeue, skb);
} else {
bcs->tx_skb = skb;
test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->hw.hscx.count = 0;
write_modem(bcs);
}
spin_unlock_irqrestore(&bcs->cs->lock, flags);
} else if (pr == (PH_ACTIVATE | REQUEST)) {
test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
set_arcofi(bcs->cs, st->l1.bc);
mstartup(bcs->cs);
modem_set_dial(bcs->cs, test_bit(FLG_ORIG, &st->l2.flag));
bcs->cs->hw.elsa.MFlag=2;
} else if (pr == (PH_DEACTIVATE | REQUEST)) {
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
interruptible_sleep_on(&bcs->cs->dc.isac.arcofi_wait);
bcs->cs->hw.elsa.MFlag=1;
} else {
printk(KERN_WARNING"ElsaSer: unknown pr %x\n", pr);
}
}
static int
setstack_elsa(struct PStack *st, struct BCState *bcs)
{
bcs->channel = st->l1.bc;
switch (st->l1.mode) {
case L1_MODE_HDLC:
case L1_MODE_TRANS:
if (open_hscxstate(st->l1.hardware, bcs))
return (-1);
st->l2.l2l1 = hscx_l2l1;
break;
case L1_MODE_MODEM:
bcs->mode = L1_MODE_MODEM;
if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
bcs->hw.hscx.rcvbuf = bcs->cs->hw.elsa.rcvbuf;
skb_queue_head_init(&bcs->rqueue);
skb_queue_head_init(&bcs->squeue);
}
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
bcs->event = 0;
bcs->hw.hscx.rcvidx = 0;
bcs->tx_cnt = 0;
bcs->cs->hw.elsa.bcs = bcs;
st->l2.l2l1 = modem_l2l1;
break;
}
st->l1.bcs = bcs;
setstack_manager(st);
bcs->st = st;
setstack_l1_B(st);
return (0);
}
static void
init_modem(struct IsdnCardState *cs) {
cs->bcs[0].BC_SetStack = setstack_elsa;
cs->bcs[1].BC_SetStack = setstack_elsa;
cs->bcs[0].BC_Close = close_elsastate;
cs->bcs[1].BC_Close = close_elsastate;
if (!(cs->hw.elsa.rcvbuf = kmalloc(MAX_MODEM_BUF,
GFP_ATOMIC))) {
printk(KERN_WARNING
"Elsa: No modem mem hw.elsa.rcvbuf\n");
return;
}
if (!(cs->hw.elsa.transbuf = kmalloc(MAX_MODEM_BUF,
GFP_ATOMIC))) {
printk(KERN_WARNING
"Elsa: No modem mem hw.elsa.transbuf\n");
kfree(cs->hw.elsa.rcvbuf);
cs->hw.elsa.rcvbuf = NULL;
return;
}
if (mstartup(cs)) {
printk(KERN_WARNING "Elsa: problem startup modem\n");
}
modem_set_init(cs);
}
static void
release_modem(struct IsdnCardState *cs) {
cs->hw.elsa.MFlag = 0;
if (cs->hw.elsa.transbuf) {
if (cs->hw.elsa.rcvbuf) {
mshutdown(cs);
kfree(cs->hw.elsa.rcvbuf);
cs->hw.elsa.rcvbuf = NULL;
}
kfree(cs->hw.elsa.transbuf);
cs->hw.elsa.transbuf = NULL;
}
}
| gpl-2.0 |
Luquidtester/DirtyKernel-3.4.76 | kernel/power/fbearlysuspend.c | 3793 | 4455 | /* kernel/power/fbearlysuspend.c
*
* Copyright (C) 2005-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/earlysuspend.h>
#include <linux/module.h>
#include <linux/wait.h>
#include "power.h"
#define MAX_BUF 100
static wait_queue_head_t fb_state_wq;
static int display = 1;
static DEFINE_SPINLOCK(fb_state_lock);
static enum {
FB_STATE_STOPPED_DRAWING,
FB_STATE_REQUEST_STOP_DRAWING,
FB_STATE_DRAWING_OK,
} fb_state;
/* tell userspace to stop drawing, wait for it to stop */
static void stop_drawing_early_suspend(struct early_suspend *h)
{
int ret;
unsigned long irq_flags;
spin_lock_irqsave(&fb_state_lock, irq_flags);
fb_state = FB_STATE_REQUEST_STOP_DRAWING;
spin_unlock_irqrestore(&fb_state_lock, irq_flags);
wake_up_all(&fb_state_wq);
ret = wait_event_timeout(fb_state_wq,
fb_state == FB_STATE_STOPPED_DRAWING,
HZ);
if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
pr_warning("stop_drawing_early_suspend: timeout waiting for "
"userspace to stop drawing\n");
}
/* tell userspace to start drawing */
static void start_drawing_late_resume(struct early_suspend *h)
{
unsigned long irq_flags;
spin_lock_irqsave(&fb_state_lock, irq_flags);
fb_state = FB_STATE_DRAWING_OK;
spin_unlock_irqrestore(&fb_state_lock, irq_flags);
wake_up(&fb_state_wq);
}
static struct early_suspend stop_drawing_early_suspend_desc = {
.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
.suspend = stop_drawing_early_suspend,
.resume = start_drawing_late_resume,
};
static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *s = buf;
int ret;
ret = wait_event_interruptible(fb_state_wq,
fb_state != FB_STATE_DRAWING_OK);
if (ret && fb_state == FB_STATE_DRAWING_OK) {
return ret;
} else {
s += sprintf(buf, "sleeping");
if (display == 1) {
display = 0;
sysfs_notify(power_kobj, NULL, "wait_for_fb_status");
}
}
return s - buf;
}
static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *s = buf;
int ret;
unsigned long irq_flags;
spin_lock_irqsave(&fb_state_lock, irq_flags);
if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
fb_state = FB_STATE_STOPPED_DRAWING;
wake_up(&fb_state_wq);
}
spin_unlock_irqrestore(&fb_state_lock, irq_flags);
ret = wait_event_interruptible(fb_state_wq,
fb_state == FB_STATE_DRAWING_OK);
if (ret && fb_state != FB_STATE_DRAWING_OK)
return ret;
else {
s += sprintf(buf, "awake");
if (display == 0) {
display = 1;
sysfs_notify(power_kobj, NULL, "wait_for_fb_status");
}
}
return s - buf;
}
static ssize_t wait_for_fb_status_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int ret = 0;
if (display == 1)
ret = snprintf(buf, strnlen("on", MAX_BUF) + 1, "on");
else
ret = snprintf(buf, strnlen("off", MAX_BUF) + 1, "off");
return ret;
}
#define power_ro_attr(_name) \
static struct kobj_attribute _name##_attr = { \
.attr = { \
.name = __stringify(_name), \
.mode = 0444, \
}, \
.show = _name##_show, \
.store = NULL, \
}
power_ro_attr(wait_for_fb_sleep);
power_ro_attr(wait_for_fb_wake);
power_ro_attr(wait_for_fb_status);
static struct attribute *g[] = {
&wait_for_fb_sleep_attr.attr,
&wait_for_fb_wake_attr.attr,
&wait_for_fb_status_attr.attr,
NULL,
};
static struct attribute_group attr_group = {
.attrs = g,
};
static int __init android_power_init(void)
{
int ret;
init_waitqueue_head(&fb_state_wq);
fb_state = FB_STATE_DRAWING_OK;
ret = sysfs_create_group(power_kobj, &attr_group);
if (ret) {
pr_err("android_power_init: sysfs_create_group failed\n");
return ret;
}
register_early_suspend(&stop_drawing_early_suspend_desc);
return 0;
}
static void __exit android_power_exit(void)
{
unregister_early_suspend(&stop_drawing_early_suspend_desc);
sysfs_remove_group(power_kobj, &attr_group);
}
module_init(android_power_init);
module_exit(android_power_exit);
| gpl-2.0 |
PyYoshi/android_kernel_sharp_303sh | arch/powerpc/platforms/pseries/hotplug-cpu.c | 4561 | 10919 | /*
* pseries CPU Hotplug infrastructure.
*
* Split out from arch/powerpc/platforms/pseries/setup.c
* arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
*
* Peter Bergner, IBM March 2001.
* Copyright (C) 2001 IBM.
* Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
* Plus various changes from other IBM teams...
*
* Copyright (C) 2006 Michael Ellerman, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h> /* for idle_task_exit */
#include <linux/cpu.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/pSeries_reconfig.h>
#include <asm/xics.h>
#include "plpar_wrappers.h"
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
static struct rtas_args rtas_stop_self_args = {
.token = RTAS_UNKNOWN_SERVICE,
.nargs = 0,
.nret = 1,
.rets = &rtas_stop_self_args.args[0],
};
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE;
static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE;
static int cede_offline_enabled __read_mostly = 1;
/*
* Enable/disable cede_offline when available.
*/
static int __init setup_cede_offline(char *str)
{
if (!strcmp(str, "off"))
cede_offline_enabled = 0;
else if (!strcmp(str, "on"))
cede_offline_enabled = 1;
else
return 0;
return 1;
}
__setup("cede_offline=", setup_cede_offline);
enum cpu_state_vals get_cpu_current_state(int cpu)
{
return per_cpu(current_state, cpu);
}
void set_cpu_current_state(int cpu, enum cpu_state_vals state)
{
per_cpu(current_state, cpu) = state;
}
enum cpu_state_vals get_preferred_offline_state(int cpu)
{
return per_cpu(preferred_offline_state, cpu);
}
void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
{
per_cpu(preferred_offline_state, cpu) = state;
}
void set_default_offline_state(int cpu)
{
per_cpu(preferred_offline_state, cpu) = default_offline_state;
}
static void rtas_stop_self(void)
{
struct rtas_args *args = &rtas_stop_self_args;
local_irq_disable();
BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(args));
panic("Alas, I survived.\n");
}
static void pseries_mach_cpu_die(void)
{
unsigned int cpu = smp_processor_id();
unsigned int hwcpu = hard_smp_processor_id();
u8 cede_latency_hint = 0;
local_irq_disable();
idle_task_exit();
xics_teardown_cpu();
if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
if (ppc_md.suspend_disable_cpu)
ppc_md.suspend_disable_cpu();
cede_latency_hint = 2;
get_lppaca()->idle = 1;
if (!get_lppaca()->shared_proc)
get_lppaca()->donate_dedicated_cpu = 1;
while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
extended_cede_processor(cede_latency_hint);
}
if (!get_lppaca()->shared_proc)
get_lppaca()->donate_dedicated_cpu = 0;
get_lppaca()->idle = 0;
if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
unregister_slb_shadow(hwcpu);
/*
* Call to start_secondary_resume() will not return.
* Kernel stack will be reset and start_secondary()
* will be called to continue the online operation.
*/
start_secondary_resume();
}
}
/* Requested state is CPU_STATE_OFFLINE at this point */
WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
unregister_slb_shadow(hwcpu);
rtas_stop_self();
/* Should never get here... */
BUG();
for(;;);
}
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
set_cpu_online(cpu, false);
vdso_data->processorCount--;
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */
xics_migrate_irqs_away();
return 0;
}
/*
* pseries_cpu_die: Wait for the cpu to die.
* @cpu: logical processor id of the CPU whose death we're awaiting.
*
* This function is called from the context of the thread which is performing
* the cpu-offline. Here we wait for long enough to allow the cpu in question
* to self-destroy so that the cpu-offline thread can send the CPU_DEAD
* notifications.
*
* OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
* self-destruct.
*/
static void pseries_cpu_die(unsigned int cpu)
{
int tries;
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
cpu_status = 1;
for (tries = 0; tries < 5000; tries++) {
if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
cpu_status = 0;
break;
}
msleep(1);
}
} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
for (tries = 0; tries < 25; tries++) {
cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break;
cpu_relax();
}
}
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
cpu, pcpu, cpu_status);
}
/* Isolation and deallocation are definitely done by
* drslot_chrp_cpu. If they were not they would be
* done here. Change isolate state to Isolate and
* change allocation-state to Unusable.
*/
paca[cpu].cpu_start = 0;
}
/*
* Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent up to two logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
* that x^1 == y and y^1 == x.
*/
static int pseries_add_processor(struct device_node *np)
{
unsigned int cpu;
cpumask_var_t candidate_mask, tmp;
int err = -ENOSPC, len, nthreads, i;
const u32 *intserv;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return 0;
zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
zalloc_cpumask_var(&tmp, GFP_KERNEL);
nthreads = len / sizeof(u32);
for (i = 0; i < nthreads; i++)
cpumask_set_cpu(i, tmp);
cpu_maps_update_begin();
BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
/* Get a bitmap of unoccupied slots. */
cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
if (cpumask_empty(candidate_mask)) {
/* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting.
*/
printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name,
cpumask_weight(cpu_possible_mask));
goto out_unlock;
}
while (!cpumask_empty(tmp))
if (cpumask_subset(tmp, candidate_mask))
/* Found a range where we can insert the new cpu(s) */
break;
else
cpumask_shift_left(tmp, tmp, nthreads);
if (cpumask_empty(tmp)) {
printk(KERN_ERR "Unable to find space in cpu_present_mask for"
" processor %s with %d thread(s)\n", np->name,
nthreads);
goto out_unlock;
}
for_each_cpu(cpu, tmp) {
BUG_ON(cpu_present(cpu));
set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++);
}
err = 0;
out_unlock:
cpu_maps_update_done();
free_cpumask_var(candidate_mask);
free_cpumask_var(tmp);
return err;
}
/*
* Update the present map for a cpu node which is going away, and set
* the hard id in the paca(s) to -1 to be consistent with boot time
* convention for non-present cpus.
*/
static void pseries_remove_processor(struct device_node *np)
{
unsigned int cpu;
int len, nthreads, i;
const u32 *intserv;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return;
nthreads = len / sizeof(u32);
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != intserv[i])
continue;
BUG_ON(cpu_online(cpu));
set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
break;
}
if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", intserv[i]);
}
cpu_maps_update_done();
}
static int pseries_smp_notifier(struct notifier_block *nb,
unsigned long action, void *node)
{
int err = 0;
switch (action) {
case PSERIES_RECONFIG_ADD:
err = pseries_add_processor(node);
break;
case PSERIES_RECONFIG_REMOVE:
pseries_remove_processor(node);
break;
}
return notifier_from_errno(err);
}
static struct notifier_block pseries_smp_nb = {
.notifier_call = pseries_smp_notifier,
};
#define MAX_CEDE_LATENCY_LEVELS 4
#define CEDE_LATENCY_PARAM_LENGTH 10
#define CEDE_LATENCY_PARAM_MAX_LENGTH \
(MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
#define CEDE_LATENCY_TOKEN 45
static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH];
static int parse_cede_parameters(void)
{
memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH);
return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
NULL,
CEDE_LATENCY_TOKEN,
__pa(cede_parameters),
CEDE_LATENCY_PARAM_MAX_LENGTH);
}
static int __init pseries_cpu_hotplug_init(void)
{
struct device_node *np;
const char *typep;
int cpu;
int qcss_tok;
for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL);
if (strstr(typep, "open-pic")) {
of_node_put(np);
printk(KERN_INFO "CPU Hotplug not supported on "
"systems using MPIC\n");
return 0;
}
}
rtas_stop_self_args.token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
return 0;
}
ppc_md.cpu_die = pseries_mach_cpu_die;
smp_ops->cpu_disable = pseries_cpu_disable;
smp_ops->cpu_die = pseries_cpu_die;
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR)) {
pSeries_reconfig_notifier_register(&pseries_smp_nb);
cpu_maps_update_begin();
if (cede_offline_enabled && parse_cede_parameters() == 0) {
default_offline_state = CPU_STATE_INACTIVE;
for_each_online_cpu(cpu)
set_default_offline_state(cpu);
}
cpu_maps_update_done();
}
return 0;
}
arch_initcall(pseries_cpu_hotplug_init);
| gpl-2.0 |
oppo-source/Neo5-kernel-source | drivers/cpufreq/s3c64xx-cpufreq.c | 5073 | 6463 | /*
* Copyright 2009 Wolfson Microelectronics plc
*
* S3C64xx CPUfreq Support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "cpufreq: " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
static struct clk *armclk;
static struct regulator *vddarm;
static unsigned long regulator_latency;
#ifdef CONFIG_CPU_S3C6410
struct s3c64xx_dvfs {
unsigned int vddarm_min;
unsigned int vddarm_max;
};
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
[2] = { 1100000, 1150000 },
[3] = { 1200000, 1350000 },
[4] = { 1300000, 1350000 },
};
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 66000 },
{ 0, 100000 },
{ 0, 133000 },
{ 1, 200000 },
{ 1, 222000 },
{ 1, 266000 },
{ 2, 333000 },
{ 2, 400000 },
{ 2, 532000 },
{ 2, 533000 },
{ 3, 667000 },
{ 4, 800000 },
{ 0, CPUFREQ_TABLE_END },
};
#endif
static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
}
static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
{
if (cpu != 0)
return 0;
return clk_get_rate(armclk) / 1000;
}
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret;
unsigned int i;
struct cpufreq_freqs freqs;
struct s3c64xx_dvfs *dvfs;
ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
target_freq, relation, &i);
if (ret != 0)
return ret;
freqs.cpu = 0;
freqs.old = clk_get_rate(armclk) / 1000;
freqs.new = s3c64xx_freq_table[i].frequency;
freqs.flags = 0;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
if (freqs.old == freqs.new)
return 0;
pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new > freqs.old) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err;
}
}
#endif
ret = clk_set_rate(armclk, freqs.new * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
freqs.new, ret);
goto err;
}
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new < freqs.old) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err_clk;
}
}
#endif
pr_debug("Set actual frequency %lukHz\n",
clk_get_rate(armclk) / 1000);
return 0;
err_clk:
if (clk_set_rate(armclk, freqs.old * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
err:
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
#ifdef CONFIG_REGULATOR
static void __init s3c64xx_cpufreq_config_regulator(void)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
struct s3c64xx_dvfs *dvfs;
count = regulator_count_voltages(vddarm);
if (count < 0) {
pr_err("Unable to check supported voltages\n");
}
freq = s3c64xx_freq_table;
while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
continue;
dvfs = &s3c64xx_dvfs_table[freq->index];
found = 0;
for (i = 0; i < count; i++) {
v = regulator_list_voltage(vddarm, i);
if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
found = 1;
}
if (!found) {
pr_debug("%dkHz unsupported by regulator\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
freq++;
}
/* Guess based on having to do an I2C/SPI write; in future we
* will be able to query the regulator performance here. */
regulator_latency = 1 * 1000 * 1000;
}
#endif
static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
{
int ret;
struct cpufreq_frequency_table *freq;
if (policy->cpu != 0)
return -EINVAL;
if (s3c64xx_freq_table == NULL) {
pr_err("No frequency information for this CPU\n");
return -ENODEV;
}
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
pr_err("Unable to obtain ARMCLK: %ld\n",
PTR_ERR(armclk));
return PTR_ERR(armclk);
}
#ifdef CONFIG_REGULATOR
vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm);
pr_err("Failed to obtain VDDARM: %d\n", ret);
pr_err("Only frequency scaling available\n");
vddarm = NULL;
} else {
s3c64xx_cpufreq_config_regulator();
}
#endif
freq = s3c64xx_freq_table;
while (freq->frequency != CPUFREQ_TABLE_END) {
unsigned long r;
/* Check for frequencies we can generate */
r = clk_round_rate(armclk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
pr_debug("%dkHz unsupported by clock\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
/* If we have no regulator then assume startup
* frequency is the maximum we can support. */
if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
freq->frequency = CPUFREQ_ENTRY_INVALID;
freq++;
}
policy->cur = clk_get_rate(armclk) / 1000;
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
if (ret != 0) {
pr_err("Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
clk_put(armclk);
}
return ret;
}
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.owner = THIS_MODULE,
.flags = 0,
.verify = s3c64xx_cpufreq_verify_speed,
.target = s3c64xx_cpufreq_set_target,
.get = s3c64xx_cpufreq_get_speed,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
};
static int __init s3c64xx_cpufreq_init(void)
{
return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
}
module_init(s3c64xx_cpufreq_init);
| gpl-2.0 |
jrior001/android_kernel_oneplus_msm8974 | kernel/stacktrace.c | 6865 | 1084 | /*
* kernel/stacktrace.c
*
* Stack trace management functions
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
void print_stack_trace(struct stack_trace *trace, int spaces)
{
int i;
if (WARN_ON(!trace->entries))
return;
for (i = 0; i < trace->nr_entries; i++) {
printk("%*c", 1 + spaces, ' ');
print_ip_sym(trace->entries[i]);
}
}
EXPORT_SYMBOL_GPL(print_stack_trace);
/*
* Architectures that do not implement save_stack_trace_tsk or
* save_stack_trace_regs get this weak alias and a once-per-bootup warning
* (whenever this facility is utilized - for example by procfs):
*/
__weak void
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
}
__weak void
save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
}
| gpl-2.0 |
Andy1911/Andy_Onepone | arch/powerpc/lib/rheap.c | 8145 | 16859 | /*
* A Remote Heap. Remote means that we don't touch the memory that the
* heap points to. Normal heap implementations use the memory they manage
* to place their list. We cannot do that because the memory we manage may
* have special properties, for example it is uncachable or of different
* endianess.
*
* Author: Pantelis Antoniou <panto@intracom.gr>
*
* 2004 (c) INTRACOM S.A. Greece. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/rheap.h>
/*
* Fixup a list_head, needed when copying lists. If the pointers fall
* between s and e, apply the delta. This assumes that
* sizeof(struct list_head *) == sizeof(unsigned long *).
*/
static inline void fixup(unsigned long s, unsigned long e, int d,
struct list_head *l)
{
unsigned long *pp;
pp = (unsigned long *)&l->next;
if (*pp >= s && *pp < e)
*pp += d;
pp = (unsigned long *)&l->prev;
if (*pp >= s && *pp < e)
*pp += d;
}
/* Grow the allocated blocks */
static int grow(rh_info_t * info, int max_blocks)
{
rh_block_t *block, *blk;
int i, new_blocks;
int delta;
unsigned long blks, blke;
if (max_blocks <= info->max_blocks)
return -EINVAL;
new_blocks = max_blocks - info->max_blocks;
block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
if (block == NULL)
return -ENOMEM;
if (info->max_blocks > 0) {
/* copy old block area */
memcpy(block, info->block,
sizeof(rh_block_t) * info->max_blocks);
delta = (char *)block - (char *)info->block;
/* and fixup list pointers */
blks = (unsigned long)info->block;
blke = (unsigned long)(info->block + info->max_blocks);
for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
fixup(blks, blke, delta, &blk->list);
fixup(blks, blke, delta, &info->empty_list);
fixup(blks, blke, delta, &info->free_list);
fixup(blks, blke, delta, &info->taken_list);
/* free the old allocated memory */
if ((info->flags & RHIF_STATIC_BLOCK) == 0)
kfree(info->block);
}
info->block = block;
info->empty_slots += new_blocks;
info->max_blocks = max_blocks;
info->flags &= ~RHIF_STATIC_BLOCK;
/* add all new blocks to the free list */
blk = block + info->max_blocks - new_blocks;
for (i = 0; i < new_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
return 0;
}
/*
* Assure at least the required amount of empty slots. If this function
* causes a grow in the block area then all pointers kept to the block
* area are invalid!
*/
static int assure_empty(rh_info_t * info, int slots)
{
int max_blocks;
/* This function is not meant to be used to grow uncontrollably */
if (slots >= 4)
return -EINVAL;
/* Enough space */
if (info->empty_slots >= slots)
return 0;
/* Next 16 sized block */
max_blocks = ((info->max_blocks + slots) + 15) & ~15;
return grow(info, max_blocks);
}
static rh_block_t *get_slot(rh_info_t * info)
{
rh_block_t *blk;
/* If no more free slots, and failure to extend. */
/* XXX: You should have called assure_empty before */
if (info->empty_slots == 0) {
printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
return NULL;
}
/* Get empty slot to use */
blk = list_entry(info->empty_list.next, rh_block_t, list);
list_del_init(&blk->list);
info->empty_slots--;
/* Initialize */
blk->start = 0;
blk->size = 0;
blk->owner = NULL;
return blk;
}
static inline void release_slot(rh_info_t * info, rh_block_t * blk)
{
list_add(&blk->list, &info->empty_list);
info->empty_slots++;
}
static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
rh_block_t *before;
rh_block_t *after;
rh_block_t *next;
int size;
unsigned long s, e, bs, be;
struct list_head *l;
/* We assume that they are aligned properly */
size = blkn->size;
s = blkn->start;
e = s + size;
/* Find the blocks immediately before and after the given one
* (if any) */
before = NULL;
after = NULL;
next = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
bs = blk->start;
be = bs + blk->size;
if (next == NULL && s >= bs)
next = blk;
if (be == s)
before = blk;
if (e == bs)
after = blk;
/* If both are not null, break now */
if (before != NULL && after != NULL)
break;
}
/* Now check if they are really adjacent */
if (before && s != (before->start + before->size))
before = NULL;
if (after && e != after->start)
after = NULL;
/* No coalescing; list insert and return */
if (before == NULL && after == NULL) {
if (next != NULL)
list_add(&blkn->list, &next->list);
else
list_add(&blkn->list, &info->free_list);
return;
}
/* We don't need it anymore */
release_slot(info, blkn);
/* Grow the before block */
if (before != NULL && after == NULL) {
before->size += size;
return;
}
/* Grow the after block backwards */
if (before == NULL && after != NULL) {
after->start -= size;
after->size += size;
return;
}
/* Grow the before block, and release the after block */
before->size += size + after->size;
list_del(&after->list);
release_slot(info, after);
}
static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
struct list_head *l;
/* Find the block immediately before the given one (if any) */
list_for_each(l, &info->taken_list) {
blk = list_entry(l, rh_block_t, list);
if (blk->start > blkn->start) {
list_add_tail(&blkn->list, &blk->list);
return;
}
}
list_add_tail(&blkn->list, &info->taken_list);
}
/*
* Create a remote heap dynamically. Note that no memory for the blocks
* are allocated. It will upon the first allocation
*/
rh_info_t *rh_create(unsigned int alignment)
{
rh_info_t *info;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return ERR_PTR(-EINVAL);
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (info == NULL)
return ERR_PTR(-ENOMEM);
info->alignment = alignment;
/* Initially everything as empty */
info->block = NULL;
info->max_blocks = 0;
info->empty_slots = 0;
info->flags = 0;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
return info;
}
EXPORT_SYMBOL_GPL(rh_create);
/*
* Destroy a dynamically created remote heap. Deallocate only if the areas
* are not static
*/
void rh_destroy(rh_info_t * info)
{
if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
kfree(info->block);
if ((info->flags & RHIF_STATIC_INFO) == 0)
kfree(info);
}
EXPORT_SYMBOL_GPL(rh_destroy);
/*
* Initialize in place a remote heap info block. This is needed to support
* operation very early in the startup of the kernel, when it is not yet safe
* to call kmalloc.
*/
void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
rh_block_t * block)
{
int i;
rh_block_t *blk;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return;
info->alignment = alignment;
/* Initially everything as empty */
info->block = block;
info->max_blocks = max_blocks;
info->empty_slots = max_blocks;
info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
/* Add all new blocks to the free list */
for (i = 0, blk = block; i < max_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
}
EXPORT_SYMBOL_GPL(rh_init);
/* Attach a free memory region, coalesces regions if adjuscent */
int rh_attach_region(rh_info_t * info, unsigned long start, int size)
{
rh_block_t *blk;
unsigned long s, e, m;
int r;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (IS_ERR_VALUE(e) || (e < s))
return -ERANGE;
/* Take final values */
start = s;
size = e - s;
/* Grow the blocks, if needed */
r = assure_empty(info, 1);
if (r < 0)
return r;
blk = get_slot(info);
blk->start = start;
blk->size = size;
blk->owner = NULL;
attach_free_block(info, blk);
return 0;
}
EXPORT_SYMBOL_GPL(rh_attach_region);
/* Detatch given address range, splits free block if needed. */
unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
{
struct list_head *l;
rh_block_t *blk, *newblk;
unsigned long s, e, m, bs, be;
/* Validate size */
if (size <= 0)
return (unsigned long) -EINVAL;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 1) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = blk->start;
be = blk->start + blk->size;
if (s >= bs && e <= be)
break;
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Perfect fit */
if (bs == s && be == e) {
/* Delete from free list, release slot */
list_del(&blk->list);
release_slot(info, blk);
return s;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start += size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* the back free fragment */
newblk = get_slot(info);
newblk->start = e;
newblk->size = be - e;
list_add(&newblk->list, &blk->list);
}
return s;
}
EXPORT_SYMBOL_GPL(rh_detach_region);
/* Allocate a block of memory at the specified alignment. The value returned
* is an offset into the buffer initialized by rh_init(), or a negative number
* if there is an error.
*/
unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
{
struct list_head *l;
rh_block_t *blk;
rh_block_t *newblk;
unsigned long start, sp_size;
/* Validate size, and alignment must be power of two */
if (size <= 0 || (alignment & (alignment - 1)) != 0)
return (unsigned long) -EINVAL;
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
if (assure_empty(info, 2) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
if (size <= blk->size) {
start = (blk->start + alignment - 1) & ~(alignment - 1);
if (start + size <= blk->start + blk->size)
break;
}
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Just fits */
if (blk->size == size) {
/* Move from free list to taken list */
list_del(&blk->list);
newblk = blk;
} else {
/* Fragment caused, split if needed */
/* Create block for fragment in the beginning */
sp_size = start - blk->start;
if (sp_size) {
rh_block_t *spblk;
spblk = get_slot(info);
spblk->start = blk->start;
spblk->size = sp_size;
/* add before the blk */
list_add(&spblk->list, blk->list.prev);
}
newblk = get_slot(info);
newblk->start = start;
newblk->size = size;
/* blk still in free list, with updated start and size
* for fragment in the end */
blk->start = start + size;
blk->size -= sp_size + size;
/* No fragment in the end, remove blk */
if (blk->size == 0) {
list_del(&blk->list);
release_slot(info, blk);
}
}
newblk->owner = owner;
attach_taken_block(info, newblk);
return start;
}
EXPORT_SYMBOL_GPL(rh_alloc_align);
/* Allocate a block of memory at the default alignment. The value returned is
* an offset into the buffer initialized by rh_init(), or a negative number if
* there is an error.
*/
unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
{
return rh_alloc_align(info, size, info->alignment, owner);
}
EXPORT_SYMBOL_GPL(rh_alloc);
/* Allocate a block of memory at the given offset, rounded up to the default
* alignment. The value returned is an offset into the buffer initialized by
* rh_init(), or a negative number if there is an error.
*/
unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
{
struct list_head *l;
rh_block_t *blk, *newblk1, *newblk2;
unsigned long s, e, m, bs = 0, be = 0;
/* Validate size */
if (size <= 0)
return (unsigned long) -EINVAL;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 2) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = blk->start;
be = blk->start + blk->size;
if (s >= bs && e <= be)
break;
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Perfect fit */
if (bs == s && be == e) {
/* Move from free list to taken list */
list_del(&blk->list);
blk->owner = owner;
start = blk->start;
attach_taken_block(info, blk);
return start;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start += size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* The back free fragment */
newblk2 = get_slot(info);
newblk2->start = e;
newblk2->size = be - e;
list_add(&newblk2->list, &blk->list);
}
newblk1 = get_slot(info);
newblk1->start = s;
newblk1->size = e - s;
newblk1->owner = owner;
start = newblk1->start;
attach_taken_block(info, newblk1);
return start;
}
EXPORT_SYMBOL_GPL(rh_alloc_fixed);
/* Deallocate the memory previously allocated by one of the rh_alloc functions.
* The return value is the size of the deallocated block, or a negative number
* if there is an error.
*/
int rh_free(rh_info_t * info, unsigned long start)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
/* Remove from taken list */
list_del(&blk->list);
/* Get size of freed block */
size = blk->size;
attach_free_block(info, blk);
return size;
}
EXPORT_SYMBOL_GPL(rh_free);
int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
{
rh_block_t *blk;
struct list_head *l;
struct list_head *h;
int nr;
switch (what) {
case RHGS_FREE:
h = &info->free_list;
break;
case RHGS_TAKEN:
h = &info->taken_list;
break;
default:
return -EINVAL;
}
/* Linear search for block */
nr = 0;
list_for_each(l, h) {
blk = list_entry(l, rh_block_t, list);
if (stats != NULL && nr < max_stats) {
stats->start = blk->start;
stats->size = blk->size;
stats->owner = blk->owner;
stats++;
}
nr++;
}
return nr;
}
EXPORT_SYMBOL_GPL(rh_get_stats);
int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
blk->owner = owner;
size = blk->size;
return size;
}
EXPORT_SYMBOL_GPL(rh_set_owner);
void rh_dump(rh_info_t * info)
{
static rh_stats_t st[32]; /* XXX maximum 32 blocks */
int maxnr;
int i, nr;
maxnr = ARRAY_SIZE(st);
printk(KERN_INFO
"info @0x%p (%d slots empty / %d max)\n",
info, info->empty_slots, info->max_blocks);
printk(KERN_INFO " Free:\n");
nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%lx-0x%lx (%u)\n",
st[i].start, st[i].start + st[i].size,
st[i].size);
printk(KERN_INFO "\n");
printk(KERN_INFO " Taken:\n");
nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%lx-0x%lx (%u) %s\n",
st[i].start, st[i].start + st[i].size,
st[i].size, st[i].owner != NULL ? st[i].owner : "");
printk(KERN_INFO "\n");
}
EXPORT_SYMBOL_GPL(rh_dump);
void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
{
printk(KERN_INFO
"blk @0x%p: 0x%lx-0x%lx (%u)\n",
blk, blk->start, blk->start + blk->size, blk->size);
}
EXPORT_SYMBOL_GPL(rh_dump_blk);
| gpl-2.0 |
rkollataj/linux-can-next | arch/m68k/kernel/setup_mm.c | 210 | 13610 | /*
* linux/arch/m68k/kernel/setup.c
*
* Copyright (C) 1995 Hamish Macdonald
*/
/*
* This file handles the architecture-dependent parts of system setup
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/console.h>
#include <linux/genhd.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/byteorder.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/machdep.h>
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
#ifdef CONFIG_ATARI
#include <asm/atarihw.h>
#include <asm/atari_stram.h>
#endif
#ifdef CONFIG_SUN3X
#include <asm/dvma.h>
#endif
#include <asm/natfeat.h>
#if !FPSTATESIZE || !NR_IRQS
#warning No CPU/platform type selected, your kernel will not work!
#warning Are you building an allnoconfig kernel?
#endif
unsigned long m68k_machtype;
EXPORT_SYMBOL(m68k_machtype);
unsigned long m68k_cputype;
EXPORT_SYMBOL(m68k_cputype);
unsigned long m68k_fputype;
unsigned long m68k_mmutype;
EXPORT_SYMBOL(m68k_mmutype);
#ifdef CONFIG_VME
unsigned long vme_brdtype;
EXPORT_SYMBOL(vme_brdtype);
#endif
int m68k_is040or060;
EXPORT_SYMBOL(m68k_is040or060);
extern unsigned long availmem;
int m68k_num_memory;
EXPORT_SYMBOL(m68k_num_memory);
int m68k_realnum_memory;
EXPORT_SYMBOL(m68k_realnum_memory);
unsigned long m68k_memoffset;
struct m68k_mem_info m68k_memory[NUM_MEMINFO];
EXPORT_SYMBOL(m68k_memory);
static struct m68k_mem_info m68k_ramdisk __initdata;
static char m68k_command_line[CL_SIZE] __initdata;
void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
/* machine dependent irq functions */
void (*mach_init_IRQ) (void) __initdata = NULL;
void (*mach_get_model) (char *model);
void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
int (*mach_set_clock_mmss) (unsigned long);
unsigned int (*mach_get_ss)(void);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
EXPORT_SYMBOL(mach_get_ss);
EXPORT_SYMBOL(mach_get_rtc_pll);
EXPORT_SYMBOL(mach_set_rtc_pll);
void (*mach_reset)( void );
void (*mach_halt)( void );
void (*mach_power_off)( void );
long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int);
EXPORT_SYMBOL(mach_heartbeat);
#endif
#ifdef CONFIG_M68K_L2_CACHE
void (*mach_l2_flush) (int);
#endif
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
void (*mach_beep)(unsigned int, unsigned int);
EXPORT_SYMBOL(mach_beep);
#endif
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
int isa_type;
int isa_sex;
EXPORT_SYMBOL(isa_type);
EXPORT_SYMBOL(isa_sex);
#endif
extern int amiga_parse_bootinfo(const struct bi_record *);
extern int atari_parse_bootinfo(const struct bi_record *);
extern int mac_parse_bootinfo(const struct bi_record *);
extern int q40_parse_bootinfo(const struct bi_record *);
extern int bvme6000_parse_bootinfo(const struct bi_record *);
extern int mvme16x_parse_bootinfo(const struct bi_record *);
extern int mvme147_parse_bootinfo(const struct bi_record *);
extern int hp300_parse_bootinfo(const struct bi_record *);
extern int apollo_parse_bootinfo(const struct bi_record *);
extern void config_amiga(void);
extern void config_atari(void);
extern void config_mac(void);
extern void config_sun3(void);
extern void config_apollo(void);
extern void config_mvme147(void);
extern void config_mvme16x(void);
extern void config_bvme6000(void);
extern void config_hp300(void);
extern void config_q40(void);
extern void config_sun3x(void);
#define MASK_256K 0xfffc0000
extern void paging_init(void);
static void __init m68k_parse_bootinfo(const struct bi_record *record)
{
uint16_t tag;
save_bootinfo(record);
while ((tag = be16_to_cpu(record->tag)) != BI_LAST) {
int unknown = 0;
const void *data = record->data;
uint16_t size = be16_to_cpu(record->size);
switch (tag) {
case BI_MACHTYPE:
case BI_CPUTYPE:
case BI_FPUTYPE:
case BI_MMUTYPE:
/* Already set up by head.S */
break;
case BI_MEMCHUNK:
if (m68k_num_memory < NUM_MEMINFO) {
const struct mem_info *m = data;
m68k_memory[m68k_num_memory].addr =
be32_to_cpu(m->addr);
m68k_memory[m68k_num_memory].size =
be32_to_cpu(m->size);
m68k_num_memory++;
} else
pr_warn("%s: too many memory chunks\n",
__func__);
break;
case BI_RAMDISK:
{
const struct mem_info *m = data;
m68k_ramdisk.addr = be32_to_cpu(m->addr);
m68k_ramdisk.size = be32_to_cpu(m->size);
}
break;
case BI_COMMAND_LINE:
strlcpy(m68k_command_line, data,
sizeof(m68k_command_line));
break;
default:
if (MACH_IS_AMIGA)
unknown = amiga_parse_bootinfo(record);
else if (MACH_IS_ATARI)
unknown = atari_parse_bootinfo(record);
else if (MACH_IS_MAC)
unknown = mac_parse_bootinfo(record);
else if (MACH_IS_Q40)
unknown = q40_parse_bootinfo(record);
else if (MACH_IS_BVME6000)
unknown = bvme6000_parse_bootinfo(record);
else if (MACH_IS_MVME16x)
unknown = mvme16x_parse_bootinfo(record);
else if (MACH_IS_MVME147)
unknown = mvme147_parse_bootinfo(record);
else if (MACH_IS_HP300)
unknown = hp300_parse_bootinfo(record);
else if (MACH_IS_APOLLO)
unknown = apollo_parse_bootinfo(record);
else
unknown = 1;
}
if (unknown)
pr_warn("%s: unknown tag 0x%04x ignored\n", __func__,
tag);
record = (struct bi_record *)((unsigned long)record + size);
}
m68k_realnum_memory = m68k_num_memory;
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
if (m68k_num_memory > 1) {
pr_warn("%s: ignoring last %i chunks of physical memory\n",
__func__, (m68k_num_memory - 1));
m68k_num_memory = 1;
}
#endif
}
void __init setup_arch(char **cmdline_p)
{
#ifndef CONFIG_SUN3
int i;
#endif
/* The bootinfo is located right after the kernel */
if (!CPU_IS_COLDFIRE)
m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040)
m68k_is040or060 = 4;
else if (CPU_IS_060)
m68k_is040or060 = 6;
/* FIXME: m68k_fputype is passed in by Penguin booter, which can
* be confused by software FPU emulation. BEWARE.
* We should really do our own FPU check at startup.
* [what do we do with buggy 68LC040s? if we have problems
* with them, we should add a test to check_bugs() below] */
#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU_ONLY)
/* clear the fpu if we have one */
if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
volatile int zero = 0;
asm volatile ("frestore %0" : : "m" (zero));
}
#endif
if (CPU_IS_060) {
u32 pcr;
asm (".chip 68060; movec %%pcr,%0; .chip 68k"
: "=d" (pcr));
if (((pcr >> 8) & 0xff) <= 5) {
pr_warn("Enabling workaround for errata I14\n");
asm (".chip 68060; movec %0,%%pcr; .chip 68k"
: : "d" (pcr | 0x20));
}
}
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
#if defined(CONFIG_BOOTPARAM)
strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
m68k_command_line[CL_SIZE - 1] = 0;
#endif /* CONFIG_BOOTPARAM */
process_uboot_commandline(&m68k_command_line[0], CL_SIZE);
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);
parse_early_param();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
switch (m68k_machtype) {
#ifdef CONFIG_AMIGA
case MACH_AMIGA:
config_amiga();
break;
#endif
#ifdef CONFIG_ATARI
case MACH_ATARI:
config_atari();
break;
#endif
#ifdef CONFIG_MAC
case MACH_MAC:
config_mac();
break;
#endif
#ifdef CONFIG_SUN3
case MACH_SUN3:
config_sun3();
break;
#endif
#ifdef CONFIG_APOLLO
case MACH_APOLLO:
config_apollo();
break;
#endif
#ifdef CONFIG_MVME147
case MACH_MVME147:
config_mvme147();
break;
#endif
#ifdef CONFIG_MVME16x
case MACH_MVME16x:
config_mvme16x();
break;
#endif
#ifdef CONFIG_BVME6000
case MACH_BVME6000:
config_bvme6000();
break;
#endif
#ifdef CONFIG_HP300
case MACH_HP300:
config_hp300();
break;
#endif
#ifdef CONFIG_Q40
case MACH_Q40:
config_q40();
break;
#endif
#ifdef CONFIG_SUN3X
case MACH_SUN3X:
config_sun3x();
break;
#endif
#ifdef CONFIG_COLDFIRE
case MACH_M54XX:
case MACH_M5441X:
config_BSP(NULL, 0);
break;
#endif
default:
panic("No configuration setup");
}
paging_init();
#ifdef CONFIG_NATFEAT
nf_init();
#endif
#ifndef CONFIG_SUN3
for (i = 1; i < m68k_num_memory; i++)
free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
m68k_ramdisk.addr, m68k_ramdisk.size,
BOOTMEM_DEFAULT);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
}
#endif
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
atari_stram_reserve_pages((void *)availmem);
#endif
#ifdef CONFIG_SUN3X
if (MACH_IS_SUN3X) {
dvma_init();
}
#endif
#endif /* !CONFIG_SUN3 */
/* set ISA defs early as possible */
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
if (MACH_IS_Q40) {
isa_type = ISA_TYPE_Q40;
isa_sex = 0;
}
#ifdef CONFIG_AMIGA_PCMCIA
if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) {
isa_type = ISA_TYPE_AG;
isa_sex = 1;
}
#endif
#ifdef CONFIG_ATARI_ROM_ISA
if (MACH_IS_ATARI) {
isa_type = ISA_TYPE_ENEC;
isa_sex = 0;
}
#endif
#endif
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
const char *cpu, *mmu, *fpu;
unsigned long clockfreq, clockfactor;
#define LOOP_CYCLES_68020 (8)
#define LOOP_CYCLES_68030 (8)
#define LOOP_CYCLES_68040 (3)
#define LOOP_CYCLES_68060 (1)
#define LOOP_CYCLES_COLDFIRE (2)
if (CPU_IS_020) {
cpu = "68020";
clockfactor = LOOP_CYCLES_68020;
} else if (CPU_IS_030) {
cpu = "68030";
clockfactor = LOOP_CYCLES_68030;
} else if (CPU_IS_040) {
cpu = "68040";
clockfactor = LOOP_CYCLES_68040;
} else if (CPU_IS_060) {
cpu = "68060";
clockfactor = LOOP_CYCLES_68060;
} else if (CPU_IS_COLDFIRE) {
cpu = "ColdFire";
clockfactor = LOOP_CYCLES_COLDFIRE;
} else {
cpu = "680x0";
clockfactor = 0;
}
#ifdef CONFIG_M68KFPU_EMU_ONLY
fpu = "none(soft float)";
#else
if (m68k_fputype & FPU_68881)
fpu = "68881";
else if (m68k_fputype & FPU_68882)
fpu = "68882";
else if (m68k_fputype & FPU_68040)
fpu = "68040";
else if (m68k_fputype & FPU_68060)
fpu = "68060";
else if (m68k_fputype & FPU_SUNFPA)
fpu = "Sun FPA";
else if (m68k_fputype & FPU_COLDFIRE)
fpu = "ColdFire";
else
fpu = "none";
#endif
if (m68k_mmutype & MMU_68851)
mmu = "68851";
else if (m68k_mmutype & MMU_68030)
mmu = "68030";
else if (m68k_mmutype & MMU_68040)
mmu = "68040";
else if (m68k_mmutype & MMU_68060)
mmu = "68060";
else if (m68k_mmutype & MMU_SUN3)
mmu = "Sun-3";
else if (m68k_mmutype & MMU_APOLLO)
mmu = "Apollo";
else if (m68k_mmutype & MMU_COLDFIRE)
mmu = "ColdFire";
else
mmu = "unknown";
clockfreq = loops_per_jiffy * HZ * clockfactor;
seq_printf(m, "CPU:\t\t%s\n"
"MMU:\t\t%s\n"
"FPU:\t\t%s\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpu, mmu, fpu,
clockfreq/1000000,(clockfreq/100000)%10,
loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
loops_per_jiffy);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < 1 ? (void *)1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#ifdef CONFIG_PROC_HARDWARE
static int hardware_proc_show(struct seq_file *m, void *v)
{
char model[80];
unsigned long mem;
int i;
if (mach_get_model)
mach_get_model(model);
else
strcpy(model, "Unknown m68k");
seq_printf(m, "Model:\t\t%s\n", model);
for (mem = 0, i = 0; i < m68k_num_memory; i++)
mem += m68k_memory[i].size;
seq_printf(m, "System Memory:\t%ldK\n", mem >> 10);
if (mach_get_hardware_list)
mach_get_hardware_list(m);
return 0;
}
static int hardware_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, hardware_proc_show, NULL);
}
static const struct file_operations hardware_proc_fops = {
.open = hardware_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init proc_hardware_init(void)
{
proc_create("hardware", 0, NULL, &hardware_proc_fops);
return 0;
}
module_init(proc_hardware_init);
#endif
void check_bugs(void)
{
#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU)
if (m68k_fputype == 0) {
pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
"WHICH IS REQUIRED BY LINUX/M68K ***\n");
pr_emerg("Upgrade your hardware or join the FPU "
"emulation project\n");
panic("no FPU");
}
#endif /* !CONFIG_M68KFPU_EMU */
}
#ifdef CONFIG_ADB
static int __init adb_probe_sync_enable (char *str) {
extern int __adb_probe_sync;
__adb_probe_sync = 1;
return 1;
}
__setup("adb_sync", adb_probe_sync_enable);
#endif /* CONFIG_ADB */
| gpl-2.0 |
miamo/miamOv | drivers/media/video/exynos/tv/hdmi_drv.c | 466 | 22260 | /*
* Samsung HDMI interface driver
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
*
* Tomasz Stanislawski, <t.stanislaws@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundiation. either version 2 of the License,
* or (at your option) any later version
*/
#include "hdmi.h"
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <media/v4l2-subdev.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/bug.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2_exynos_media.h>
#include <linux/sched.h>
#include <plat/tvout.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/exynos_mc.h>
MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
MODULE_DESCRIPTION("Samsung HDMI");
MODULE_LICENSE("GPL");
/* default preset configured on probe */
#define HDMI_DEFAULT_PRESET V4L2_DV_1080P60
/* I2C module and id for HDMIPHY */
static struct i2c_board_info hdmiphy_info = {
I2C_BOARD_INFO("hdmiphy", 0x38),
};
static struct hdmi_driver_data hdmi_driver_data[] = {
{ .hdmiphy_bus = 3 },
{ .hdmiphy_bus = 8 },
{ .hdmiphy_bus = 8 },
};
static struct platform_device_id hdmi_driver_types[] = {
{
.name = "s5pv210-hdmi",
.driver_data = (unsigned long)&hdmi_driver_data[0],
}, {
.name = "exynos4-hdmi",
.driver_data = (unsigned long)&hdmi_driver_data[1],
}, {
.name = "exynos5-hdmi",
.driver_data = (unsigned long)&hdmi_driver_data[2],
}, {
/* end node */
}
};
static const struct v4l2_subdev_ops hdmi_sd_ops;
static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
{
return container_of(sd, struct hdmi_device, sd);
}
static int set_external_hpd_int(struct hdmi_device *hdev)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&hdev->hpd_lock, flags);
s5p_v4l2_int_src_ext_hpd();
/* irq change by TV power status */
if (hdev->curr_irq != hdev->ext_irq) {
disable_irq(hdev->curr_irq);
free_irq(hdev->curr_irq, hdev);
} else {
spin_unlock_irqrestore(&hdev->hpd_lock, flags);
return ret;
}
hdev->curr_irq = hdev->ext_irq;
ret = request_irq(hdev->curr_irq, hdmi_irq_handler,
IRQ_TYPE_EDGE_BOTH, "hdmi", hdev);
if (ret)
dev_err(hdev->dev, "request change failed.\n");
dev_info(hdev->dev, "HDMI interrupt source is changed : external\n");
spin_unlock_irqrestore(&hdev->hpd_lock, flags);
return ret;
}
static int set_internal_hpd_int(struct hdmi_device *hdev)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&hdev->hpd_lock, flags);
s5p_v4l2_int_src_hdmi_hpd();
/* irq change by TV power status */
if (hdev->curr_irq != hdev->int_irq) {
disable_irq(hdev->curr_irq);
free_irq(hdev->curr_irq, hdev);
} else {
spin_unlock_irqrestore(&hdev->hpd_lock, flags);
return ret;
}
hdev->curr_irq = hdev->int_irq;
ret = request_irq(hdev->curr_irq, hdmi_irq_handler,
0, "hdmi", hdev);
if (ret)
dev_err(hdev->dev, "request change failed.\n");
dev_info(hdev->dev, "HDMI interrupt source is changed : internal\n");
spin_unlock_irqrestore(&hdev->hpd_lock, flags);
return ret;
}
static const struct hdmi_preset_conf *hdmi_preset2conf(u32 preset)
{
int i;
for (i = 0; i < hdmi_pre_cnt; ++i)
if (hdmi_conf[i].preset == preset)
return hdmi_conf[i].conf;
return NULL;
}
const struct hdmi_3d_info *hdmi_preset2info(u32 preset)
{
int i;
for (i = 0; i < hdmi_pre_cnt; ++i)
if (hdmi_conf[i].preset == preset)
return hdmi_conf[i].info;
return NULL;
}
static int hdmi_set_infoframe(struct hdmi_device *hdev)
{
struct hdmi_infoframe infoframe;
const struct hdmi_3d_info *info;
info = hdmi_preset2info(hdev->cur_preset);
if (info->is_3d == HDMI_VIDEO_FORMAT_3D) {
infoframe.type = HDMI_PACKET_TYPE_VSI;
infoframe.ver = HDMI_VSI_VERSION;
infoframe.len = HDMI_VSI_LENGTH;
hdmi_reg_infoframe(hdev, &infoframe);
} else
hdmi_reg_stop_vsi(hdev);
infoframe.type = HDMI_PACKET_TYPE_AVI;
infoframe.ver = HDMI_AVI_VERSION;
infoframe.len = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdev, &infoframe);
return 0;
}
static int hdmi_set_packets(struct hdmi_device *hdev)
{
hdmi_reg_set_acr(hdev);
return 0;
}
static int hdmi_streamon(struct hdmi_device *hdev)
{
struct device *dev = hdev->dev;
struct hdmi_resources *res = &hdev->res;
int ret, tries;
dev_dbg(dev, "%s\n", __func__);
hdev->streaming = 1;
ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
if (ret)
return ret;
/* waiting for HDMIPHY's PLL to get to steady state */
for (tries = 100; tries; --tries) {
if (is_hdmiphy_ready(hdev))
break;
mdelay(1);
}
/* steady state not achieved */
if (tries == 0) {
dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
hdmi_dumpregs(hdev, "s_stream");
return -EIO;
}
/* hdmiphy clock is used for HDMI in streaming mode */
clk_disable(res->sclk_hdmi);
clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
clk_enable(res->sclk_hdmi);
/* 3D test */
hdmi_set_infoframe(hdev);
/* set packets for audio */
hdmi_set_packets(hdev);
/* init audio */
#if defined(CONFIG_VIDEO_EXYNOS_HDMI_AUDIO_I2S)
hdmi_reg_i2s_audio_init(hdev);
#elif defined(CONFIG_VIDEO_EXYNOS_HDMI_AUDIO_SPDIF)
hdmi_reg_spdif_audio_init(hdev);
#endif
/* enbale HDMI audio */
if (hdev->audio_enable)
hdmi_audio_enable(hdev, 1);
hdmi_set_dvi_mode(hdev);
/* enable HDMI and timing generator */
hdmi_enable(hdev, 1);
hdmi_tg_enable(hdev, 1);
/* start HDCP if enabled */
if (hdev->hdcp_info.hdcp_enable) {
ret = hdcp_start(hdev);
if (ret)
return ret;
}
hdmi_dumpregs(hdev, "streamon");
return 0;
}
static int hdmi_streamoff(struct hdmi_device *hdev)
{
struct device *dev = hdev->dev;
struct hdmi_resources *res = &hdev->res;
dev_dbg(dev, "%s\n", __func__);
if (hdev->hdcp_info.hdcp_enable)
hdcp_stop(hdev);
hdmi_audio_enable(hdev, 0);
hdmi_enable(hdev, 0);
hdmi_tg_enable(hdev, 0);
/* pixel(vpll) clock is used for HDMI in config mode */
clk_disable(res->sclk_hdmi);
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
clk_enable(res->sclk_hdmi);
v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
hdev->streaming = 0;
hdmi_dumpregs(hdev, "streamoff");
return 0;
}
static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
dev_dbg(dev, "%s(%d)\n", __func__, enable);
if (enable)
return hdmi_streamon(hdev);
return hdmi_streamoff(hdev);
}
static void hdmi_resource_poweron(struct hdmi_resources *res)
{
/* power-on hdmi physical interface */
clk_enable(res->hdmiphy);
/* use VPP as parent clock; HDMIPHY is not working yet */
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
/* turn clocks on */
clk_enable(res->sclk_hdmi);
}
static int hdmi_runtime_resume(struct device *dev);
static int hdmi_runtime_suspend(struct device *dev);
static int hdmi_s_power(struct v4l2_subdev *sd, int on)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
/* If runtime PM is not implemented, hdmi_runtime_resume
* and hdmi_runtime_suspend functions are directly called.
*/
#ifdef CONFIG_PM_RUNTIME
int ret;
if (on) {
clk_enable(hdev->res.hdmi);
hdmi_hpd_enable(hdev, 1);
ret = pm_runtime_get_sync(hdev->dev);
set_internal_hpd_int(hdev);
} else {
hdmi_hpd_enable(hdev, 0);
set_external_hpd_int(hdev);
ret = pm_runtime_put_sync(hdev->dev);
clk_disable(hdev->res.hdmi);
}
/* only values < 0 indicate errors */
return IS_ERR_VALUE(ret) ? ret : 0;
#else
if (on) {
clk_enable(hdev->res.hdmi);
hdmi_hpd_enable(hdev, 1);
set_internal_hpd_int(hdev);
hdmi_runtime_resume(hdev->dev);
} else {
hdmi_hpd_enable(hdev, 0);
set_external_hpd_int(hdev);
clk_disable(hdev->res.hdmi);
hdmi_runtime_suspend(hdev->dev);
}
return 0;
#endif
}
int hdmi_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
int ret = 0;
dev_dbg(dev, "%s start\n", __func__);
switch (ctrl->id) {
case V4L2_CID_TV_SET_DVI_MODE:
hdev->dvi_mode = ctrl->value;
break;
default:
dev_err(dev, "invalid control id\n");
ret = -EINVAL;
break;
}
return ret;
}
int hdmi_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
unsigned long flags;
spin_lock_irqsave(&hdev->hpd_lock, flags);
if (!pm_runtime_suspended(hdev->dev) && !hdev->hpd_user_checked)
ctrl->value = hdmi_hpd_status(hdev);
else
ctrl->value = atomic_read(&hdev->hpd_state);
dev_dbg(dev, "HDMI cable is %s\n", ctrl->value ?
"connected" : "disconnected");
spin_unlock_irqrestore(&hdev->hpd_lock, flags);
return 0;
}
static int hdmi_s_dv_preset(struct v4l2_subdev *sd,
struct v4l2_dv_preset *preset)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
const struct hdmi_preset_conf *conf;
conf = hdmi_preset2conf(preset->preset);
if (conf == NULL) {
dev_err(dev, "preset (%u) not supported\n", preset->preset);
return -EINVAL;
}
hdev->cur_conf = conf;
hdev->cur_preset = preset->preset;
return 0;
}
static int hdmi_g_dv_preset(struct v4l2_subdev *sd,
struct v4l2_dv_preset *preset)
{
memset(preset, 0, sizeof(*preset));
preset->preset = sd_to_hdmi_dev(sd)->cur_preset;
return 0;
}
static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
dev_dbg(dev, "%s\n", __func__);
if (!hdev->cur_conf)
return -EINVAL;
*fmt = hdev->cur_conf->mbus_fmt;
return 0;
}
static int hdmi_s_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
dev_dbg(dev, "%s\n", __func__);
if (fmt->code == V4L2_MBUS_FMT_YUV8_1X24)
hdev->output_fmt = HDMI_OUTPUT_YUV444;
else
hdev->output_fmt = HDMI_OUTPUT_RGB888;
return 0;
}
static int hdmi_enum_dv_presets(struct v4l2_subdev *sd,
struct v4l2_dv_enum_preset *preset)
{
if (preset->index >= hdmi_pre_cnt)
return -EINVAL;
return v4l_fill_dv_preset_info(hdmi_conf[preset->index].preset, preset);
}
static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
.s_power = hdmi_s_power,
.s_ctrl = hdmi_s_ctrl,
.g_ctrl = hdmi_g_ctrl,
};
static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
.s_dv_preset = hdmi_s_dv_preset,
.g_dv_preset = hdmi_g_dv_preset,
.enum_dv_presets = hdmi_enum_dv_presets,
.g_mbus_fmt = hdmi_g_mbus_fmt,
.s_mbus_fmt = hdmi_s_mbus_fmt,
.s_stream = hdmi_s_stream,
};
static const struct v4l2_subdev_ops hdmi_sd_ops = {
.core = &hdmi_sd_core_ops,
.video = &hdmi_sd_video_ops,
};
static int hdmi_runtime_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct hdmi_resources *res = &hdev->res;
dev_dbg(dev, "%s\n", __func__);
/* HDMI PHY off sequence
* LINK off -> PHY off -> HDMI_PHY_CONTROL disable */
/* turn clocks off */
clk_disable(res->sclk_hdmi);
v4l2_subdev_call(hdev->phy_sd, core, s_power, 0);
/* power-off hdmiphy */
clk_disable(res->hdmiphy);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct hdmi_resources *res = &hdev->res;
int ret = 0;
dev_dbg(dev, "%s\n", __func__);
hdmi_resource_poweron(&hdev->res);
hdmi_phy_sw_reset(hdev);
ret = v4l2_subdev_call(hdev->phy_sd, core, s_power, 1);
if (ret) {
dev_err(dev, "failed to turn on hdmiphy\n");
goto fail;
}
ret = hdmi_conf_apply(hdev);
if (ret)
goto fail;
dev_dbg(dev, "poweron succeed\n");
return 0;
fail:
clk_disable(res->sclk_hdmi);
v4l2_subdev_call(hdev->phy_sd, core, s_power, 0);
clk_disable(res->hdmiphy);
dev_err(dev, "poweron failed\n");
return ret;
}
static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_suspend = hdmi_runtime_suspend,
.runtime_resume = hdmi_runtime_resume,
};
static void hdmi_resources_cleanup(struct hdmi_device *hdev)
{
struct hdmi_resources *res = &hdev->res;
dev_dbg(hdev->dev, "HDMI resource cleanup\n");
/* put clocks */
if (!IS_ERR_OR_NULL(res->hdmiphy))
clk_put(res->hdmiphy);
if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
clk_put(res->sclk_hdmiphy);
if (!IS_ERR_OR_NULL(res->sclk_pixel))
clk_put(res->sclk_pixel);
if (!IS_ERR_OR_NULL(res->sclk_hdmi))
clk_put(res->sclk_hdmi);
if (!IS_ERR_OR_NULL(res->hdmi))
clk_put(res->hdmi);
memset(res, 0, sizeof *res);
}
static int hdmi_resources_init(struct hdmi_device *hdev)
{
struct device *dev = hdev->dev;
struct hdmi_resources *res = &hdev->res;
dev_dbg(dev, "HDMI resource init\n");
memset(res, 0, sizeof *res);
/* get clocks, power */
res->hdmi = clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
dev_err(dev, "failed to get clock 'hdmi'\n");
goto fail;
}
res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
goto fail;
}
res->sclk_pixel = clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
dev_err(dev, "failed to get clock 'sclk_pixel'\n");
goto fail;
}
res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
res->hdmiphy = clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
dev_err(dev, "failed to get clock 'hdmiphy'\n");
goto fail;
}
return 0;
fail:
dev_err(dev, "HDMI resource init - failed\n");
hdmi_resources_cleanup(hdev);
return -ENODEV;
}
static int hdmi_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
return 0;
}
/* hdmi entity operations */
static const struct media_entity_operations hdmi_entity_ops = {
.link_setup = hdmi_link_setup,
};
static int hdmi_register_entity(struct hdmi_device *hdev)
{
struct v4l2_subdev *sd = &hdev->sd;
struct v4l2_device *v4l2_dev;
struct media_pad *pads = &hdev->pad;
struct media_entity *me = &sd->entity;
struct device *dev = hdev->dev;
struct exynos_md *md;
int ret;
dev_dbg(dev, "HDMI entity init\n");
/* init hdmi subdev */
v4l2_subdev_init(sd, &hdmi_sd_ops);
sd->owner = THIS_MODULE;
strlcpy(sd->name, "s5p-hdmi", sizeof(sd->name));
dev_set_drvdata(dev, sd);
/* init hdmi sub-device as entity */
pads[HDMI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
me->ops = &hdmi_entity_ops;
ret = media_entity_init(me, HDMI_PADS_NUM, pads, 0);
if (ret) {
dev_err(dev, "failed to initialize media entity\n");
return ret;
}
/* get output media ptr for registering hdmi's sd */
md = (struct exynos_md *)module_name_to_driver_data(MDEV_MODULE_NAME);
if (!md) {
dev_err(dev, "failed to get output media device\n");
return -ENODEV;
}
v4l2_dev = &md->v4l2_dev;
/* regiser HDMI subdev as entity to v4l2_dev pointer of
* output media device
*/
ret = v4l2_device_register_subdev(v4l2_dev, sd);
if (ret) {
dev_err(dev, "failed to register HDMI subdev\n");
return ret;
}
return 0;
}
static void hdmi_entity_info_print(struct hdmi_device *hdev)
{
struct v4l2_subdev *sd = &hdev->sd;
struct media_entity *me = &sd->entity;
dev_dbg(hdev->dev, "\n************* HDMI entity info **************\n");
dev_dbg(hdev->dev, "[SUB DEVICE INFO]\n");
entity_info_print(me, hdev->dev);
dev_dbg(hdev->dev, "*********************************************\n\n");
}
static void s5p_hpd_kobject_uevent(struct work_struct *work)
{
struct hdmi_device *hdev = container_of(work, struct hdmi_device,
hpd_work);
char *disconnected[2] = { "HDMI_STATE=offline", NULL };
char *connected[2] = { "HDMI_STATE=online", NULL };
char **envp = NULL;
int state = atomic_read(&hdev->hpd_state);
/* irq setting by TV power on/off status */
if (!pm_runtime_suspended(hdev->dev))
set_internal_hpd_int(hdev);
else
set_external_hpd_int(hdev);
if (state)
envp = connected;
else
envp = disconnected;
hdev->hpd_user_checked = true;
kobject_uevent_env(&hdev->dev->kobj, KOBJ_CHANGE, envp);
pr_info("%s: sent uevent %s\n", __func__, envp[0]);
}
static int __devinit hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct i2c_adapter *phy_adapter;
struct hdmi_device *hdmi_dev = NULL;
struct hdmi_driver_data *drv_data;
int ret;
unsigned int irq_type;
dev_dbg(dev, "probe start\n");
hdmi_dev = kzalloc(sizeof(*hdmi_dev), GFP_KERNEL);
if (!hdmi_dev) {
dev_err(dev, "out of memory\n");
ret = -ENOMEM;
goto fail;
}
hdmi_dev->dev = dev;
ret = hdmi_resources_init(hdmi_dev);
if (ret)
goto fail_hdev;
/* mapping HDMI registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO;
goto fail_init;
}
hdmi_dev->regs = ioremap(res->start, resource_size(res));
if (hdmi_dev->regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
goto fail_hdev;
}
/* External hpd */
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get external interrupt resource failed.\n");
ret = -ENXIO;
goto fail_regs;
}
hdmi_dev->ext_irq = res->start;
/* Internal hpd */
res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (res == NULL) {
dev_err(dev, "get internal interrupt resource failed.\n");
ret = -ENXIO;
goto fail_regs;
}
hdmi_dev->int_irq = res->start;
/* workqueue for HPD */
hdmi_dev->hpd_wq = create_workqueue("hdmi-hpd");
if (hdmi_dev->hpd_wq == NULL)
ret = -ENXIO;
INIT_WORK(&hdmi_dev->hpd_work, s5p_hpd_kobject_uevent);
/* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
sizeof(hdmi_dev->v4l2_dev.name));
/* passing NULL owner prevents driver from erasing drvdata */
ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
if (ret) {
dev_err(dev, "could not register v4l2 device.\n");
goto fail_regs;
}
drv_data = (struct hdmi_driver_data *)
platform_get_device_id(pdev)->driver_data;
dev_info(dev, "hdmiphy i2c bus number = %d\n", drv_data->hdmiphy_bus);
phy_adapter = i2c_get_adapter(drv_data->hdmiphy_bus);
if (phy_adapter == NULL) {
dev_err(dev, "adapter request failed\n");
ret = -ENXIO;
goto fail_vdev;
}
hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
phy_adapter, &hdmiphy_info, NULL);
/* on failure or not adapter is no longer useful */
i2c_put_adapter(phy_adapter);
if (hdmi_dev->phy_sd == NULL) {
dev_err(dev, "missing subdev for hdmiphy\n");
ret = -ENODEV;
goto fail_vdev;
}
/* HDMI PHY power off
* HDMI PHY is on as default configuration
* So, HDMI PHY must be turned off if it's not used */
clk_enable(hdmi_dev->res.hdmiphy);
v4l2_subdev_call(hdmi_dev->phy_sd, core, s_power, 0);
clk_disable(hdmi_dev->res.hdmiphy);
pm_runtime_enable(dev);
/* irq setting by TV power on/off status */
if (!pm_runtime_suspended(hdmi_dev->dev)) {
hdmi_dev->curr_irq = hdmi_dev->int_irq;
irq_type = 0;
s5p_v4l2_int_src_hdmi_hpd();
} else {
if (s5p_v4l2_hpd_read_gpio())
atomic_set(&hdmi_dev->hpd_state, HPD_HIGH);
else
atomic_set(&hdmi_dev->hpd_state, HPD_LOW);
hdmi_dev->curr_irq = hdmi_dev->ext_irq;
irq_type = IRQ_TYPE_EDGE_BOTH;
s5p_v4l2_int_src_ext_hpd();
}
hdmi_dev->hpd_user_checked = false;
ret = request_irq(hdmi_dev->curr_irq, hdmi_irq_handler,
irq_type, "hdmi", hdmi_dev);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
goto fail_vdev;
}
hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
/* FIXME: missing fail preset is not supported */
hdmi_dev->cur_conf = hdmi_preset2conf(hdmi_dev->cur_preset);
/* default audio configuration : enable audio */
hdmi_dev->audio_enable = 1;
hdmi_dev->sample_rate = DEFAULT_SAMPLE_RATE;
hdmi_dev->bits_per_sample = DEFAULT_BITS_PER_SAMPLE;
hdmi_dev->audio_codec = DEFAULT_AUDIO_CODEC;
/* register hdmi subdev as entity */
ret = hdmi_register_entity(hdmi_dev);
if (ret)
goto fail_irq;
hdmi_entity_info_print(hdmi_dev);
/* initialize hdcp resource */
ret = hdcp_prepare(hdmi_dev);
if (ret)
goto fail_irq;
dev_info(dev, "probe sucessful\n");
return 0;
fail_vdev:
v4l2_device_unregister(&hdmi_dev->v4l2_dev);
fail_irq:
free_irq(hdmi_dev->curr_irq, hdmi_dev);
fail_regs:
iounmap(hdmi_dev->regs);
fail_init:
hdmi_resources_cleanup(hdmi_dev);
fail_hdev:
kfree(hdmi_dev);
fail:
dev_err(dev, "probe failed\n");
return ret;
}
static int __devexit hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
pm_runtime_disable(dev);
clk_disable(hdmi_dev->res.hdmi);
v4l2_device_unregister(&hdmi_dev->v4l2_dev);
disable_irq(hdmi_dev->curr_irq);
free_irq(hdmi_dev->curr_irq, hdmi_dev);
iounmap(hdmi_dev->regs);
hdmi_resources_cleanup(hdmi_dev);
flush_workqueue(hdmi_dev->hdcp_wq);
destroy_workqueue(hdmi_dev->hdcp_wq);
kfree(hdmi_dev);
dev_info(dev, "remove sucessful\n");
return 0;
}
static struct platform_driver hdmi_driver __refdata = {
.probe = hdmi_probe,
.remove = __devexit_p(hdmi_remove),
.id_table = hdmi_driver_types,
.driver = {
.name = "s5p-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
}
};
/* D R I V E R I N I T I A L I Z A T I O N */
static int __init hdmi_init(void)
{
int ret;
static const char banner[] __initdata = KERN_INFO \
"Samsung HDMI output driver, "
"(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
printk(banner);
ret = platform_driver_register(&hdmi_driver);
if (ret)
printk(KERN_ERR "HDMI platform driver register failed\n");
return ret;
}
module_init(hdmi_init);
static void __exit hdmi_exit(void)
{
platform_driver_unregister(&hdmi_driver);
}
module_exit(hdmi_exit);
| gpl-2.0 |
ziozzang/kernel-rhel6 | net/ipv4/netfilter.c | 466 | 6703 | /* IPv4 specific functions of netfilter core */
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/netfilter/nf_queue.h>
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
{
struct net *net = dev_net(skb_dst(skb)->dev);
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
struct flowi fl = {};
struct dst_entry *odst;
unsigned int hh_len;
unsigned int type;
type = inet_addr_type(net, iph->saddr);
if (skb->sk && inet_sk(skb->sk)->transparent)
type = RTN_LOCAL;
if (addr_type == RTN_UNSPEC)
addr_type = type;
/* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
* packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
*/
if (addr_type == RTN_LOCAL) {
fl.nl_u.ip4_u.daddr = iph->daddr;
if (type == RTN_LOCAL)
fl.nl_u.ip4_u.saddr = iph->saddr;
fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
fl.mark = skb->mark;
fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
if (ip_route_output_key(net, &rt, &fl) != 0)
return -1;
/* Drop old route. */
skb_dst_drop(skb);
skb_dst_set(skb, &rt->u.dst);
} else {
/* non-local src, find valid iif to satisfy
* rp-filter when calling ip_route_input. */
fl.nl_u.ip4_u.daddr = iph->saddr;
if (ip_route_output_key(net, &rt, &fl) != 0)
return -1;
odst = skb_dst(skb);
if (ip_route_input(skb, iph->daddr, iph->saddr,
RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
dst_release(&rt->u.dst);
return -1;
}
dst_release(&rt->u.dst);
dst_release(odst);
}
if (skb_dst(skb)->error)
return -1;
#ifdef CONFIG_XFRM
if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
xfrm_decode_session(skb, &fl, AF_INET) == 0) {
struct dst_entry *dst = skb_dst(skb);
skb_dst_set(skb, NULL);
if (xfrm_lookup(net, &dst, &fl, skb->sk, 0))
return -1;
skb_dst_set(skb, dst);
}
#endif
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
return -1;
return 0;
}
EXPORT_SYMBOL(ip_route_me_harder);
#ifdef CONFIG_XFRM
int ip_xfrm_me_harder(struct sk_buff *skb)
{
struct flowi fl;
unsigned int hh_len;
struct dst_entry *dst;
if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return 0;
if (xfrm_decode_session(skb, &fl, AF_INET) < 0)
return -1;
dst = skb_dst(skb);
if (dst->xfrm)
dst = ((struct xfrm_dst *)dst)->route;
dst_hold(dst);
if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0)
return -1;
skb_dst_drop(skb);
skb_dst_set(skb, dst);
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
return -1;
return 0;
}
EXPORT_SYMBOL(ip_xfrm_me_harder);
#endif
void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
EXPORT_SYMBOL(ip_nat_decode_session);
/*
* Extra routing may needed on local out, as the QUEUE target never
* returns control to the table.
*/
struct ip_rt_info {
__be32 daddr;
__be32 saddr;
u_int8_t tos;
u_int32_t mark;
};
static void nf_ip_saveroute(const struct sk_buff *skb,
struct nf_queue_entry *entry)
{
struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
if (entry->hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
rt_info->tos = iph->tos;
rt_info->daddr = iph->daddr;
rt_info->saddr = iph->saddr;
rt_info->mark = skb->mark;
}
}
static int nf_ip_reroute(struct sk_buff *skb,
const struct nf_queue_entry *entry)
{
const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
if (entry->hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
if (!(iph->tos == rt_info->tos
&& skb->mark == rt_info->mark
&& iph->daddr == rt_info->daddr
&& iph->saddr == rt_info->saddr))
return ip_route_me_harder(skb, RTN_UNSPEC);
}
return 0;
}
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol)
{
const struct iphdr *iph = ip_hdr(skb);
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
break;
if ((protocol == 0 && !csum_fold(skb->csum)) ||
!csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - dataoff, protocol,
skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
/* fall through */
case CHECKSUM_NONE:
if (protocol == 0)
skb->csum = 0;
else
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
skb->len - dataoff,
protocol, 0);
csum = __skb_checksum_complete(skb);
}
return csum;
}
EXPORT_SYMBOL(nf_ip_checksum);
static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol)
{
const struct iphdr *iph = ip_hdr(skb);
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (len == skb->len - dataoff)
return nf_ip_checksum(skb, hook, dataoff, protocol);
/* fall through */
case CHECKSUM_NONE:
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
skb->len - dataoff, 0);
skb->ip_summed = CHECKSUM_NONE;
csum = __skb_checksum_complete_head(skb, dataoff + len);
if (!csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return csum;
}
static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
{
return ip_route_output_key(&init_net, (struct rtable **)dst, fl);
}
static const struct nf_afinfo nf_ip_afinfo = {
.family = AF_INET,
.checksum = nf_ip_checksum,
.checksum_partial = nf_ip_checksum_partial,
.route = nf_ip_route,
.saveroute = nf_ip_saveroute,
.reroute = nf_ip_reroute,
.route_key_size = sizeof(struct ip_rt_info),
};
static int ipv4_netfilter_init(void)
{
return nf_register_afinfo(&nf_ip_afinfo);
}
static void ipv4_netfilter_fini(void)
{
nf_unregister_afinfo(&nf_ip_afinfo);
}
module_init(ipv4_netfilter_init);
module_exit(ipv4_netfilter_fini);
#ifdef CONFIG_SYSCTL
struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
{ .procname = "netfilter", .ctl_name = NET_IPV4_NETFILTER, },
{ }
};
EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path);
#endif /* CONFIG_SYSCTL */
| gpl-2.0 |
priyatransbit/linux | drivers/input/serio/serport.c | 722 | 7082 | /*
* Input device TTY line discipline
*
* Copyright (c) 1999-2002 Vojtech Pavlik
*
* This is a module that converts a tty line into a much simpler
* 'serial io port' abstraction that the input device drivers use.
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <asm/uaccess.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/serio.h>
#include <linux/tty.h>
#include <linux/compat.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Input device TTY line discipline");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_MOUSE);
#define SERPORT_BUSY 1
#define SERPORT_ACTIVE 2
#define SERPORT_DEAD 3
struct serport {
struct tty_struct *tty;
wait_queue_head_t wait;
struct serio *serio;
struct serio_device_id id;
spinlock_t lock;
unsigned long flags;
};
/*
* Callback functions from the serio code.
*/
static int serport_serio_write(struct serio *serio, unsigned char data)
{
struct serport *serport = serio->port_data;
return -(serport->tty->ops->write(serport->tty, &data, 1) != 1);
}
static int serport_serio_open(struct serio *serio)
{
struct serport *serport = serio->port_data;
unsigned long flags;
spin_lock_irqsave(&serport->lock, flags);
set_bit(SERPORT_ACTIVE, &serport->flags);
spin_unlock_irqrestore(&serport->lock, flags);
return 0;
}
static void serport_serio_close(struct serio *serio)
{
struct serport *serport = serio->port_data;
unsigned long flags;
spin_lock_irqsave(&serport->lock, flags);
clear_bit(SERPORT_ACTIVE, &serport->flags);
set_bit(SERPORT_DEAD, &serport->flags);
spin_unlock_irqrestore(&serport->lock, flags);
wake_up_interruptible(&serport->wait);
}
/*
* serport_ldisc_open() is the routine that is called upon setting our line
* discipline on a tty. It prepares the serio struct.
*/
static int serport_ldisc_open(struct tty_struct *tty)
{
struct serport *serport;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
serport = kzalloc(sizeof(struct serport), GFP_KERNEL);
if (!serport)
return -ENOMEM;
serport->tty = tty;
spin_lock_init(&serport->lock);
init_waitqueue_head(&serport->wait);
tty->disc_data = serport;
tty->receive_room = 256;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
return 0;
}
/*
* serport_ldisc_close() is the opposite of serport_ldisc_open()
*/
static void serport_ldisc_close(struct tty_struct *tty)
{
struct serport *serport = (struct serport *) tty->disc_data;
kfree(serport);
}
/*
* serport_ldisc_receive() is called by the low level tty driver when characters
* are ready for us. We forward the characters and flags, one by one to the
* 'interrupt' routine.
*/
static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
unsigned int ch_flags = 0;
int i;
spin_lock_irqsave(&serport->lock, flags);
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
goto out;
for (i = 0; i < count; i++) {
if (fp) {
switch (fp[i]) {
case TTY_FRAME:
ch_flags = SERIO_FRAME;
break;
case TTY_PARITY:
ch_flags = SERIO_PARITY;
break;
default:
ch_flags = 0;
break;
}
}
serio_interrupt(serport->serio, cp[i], ch_flags);
}
out:
spin_unlock_irqrestore(&serport->lock, flags);
}
/*
* serport_ldisc_read() just waits indefinitely if everything goes well.
* However, when the serio driver closes the serio port, it finishes,
* returning 0 characters.
*/
static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, unsigned char __user * buf, size_t nr)
{
struct serport *serport = (struct serport*) tty->disc_data;
struct serio *serio;
if (test_and_set_bit(SERPORT_BUSY, &serport->flags))
return -EBUSY;
serport->serio = serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!serio)
return -ENOMEM;
strlcpy(serio->name, "Serial port", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", tty_name(tty));
serio->id = serport->id;
serio->id.type = SERIO_RS232;
serio->write = serport_serio_write;
serio->open = serport_serio_open;
serio->close = serport_serio_close;
serio->port_data = serport;
serio->dev.parent = tty->dev;
serio_register_port(serport->serio);
printk(KERN_INFO "serio: Serial port %s\n", tty_name(tty));
wait_event_interruptible(serport->wait, test_bit(SERPORT_DEAD, &serport->flags));
serio_unregister_port(serport->serio);
serport->serio = NULL;
clear_bit(SERPORT_DEAD, &serport->flags);
clear_bit(SERPORT_BUSY, &serport->flags);
return 0;
}
static void serport_set_type(struct tty_struct *tty, unsigned long type)
{
struct serport *serport = tty->disc_data;
serport->id.proto = type & 0x000000ff;
serport->id.id = (type & 0x0000ff00) >> 8;
serport->id.extra = (type & 0x00ff0000) >> 16;
}
/*
* serport_ldisc_ioctl() allows to set the port protocol, and device ID
*/
static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
if (cmd == SPIOCSTYPE) {
unsigned long type;
if (get_user(type, (unsigned long __user *) arg))
return -EFAULT;
serport_set_type(tty, type);
return 0;
}
return -EINVAL;
}
#ifdef CONFIG_COMPAT
#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
struct file *file,
unsigned int cmd, unsigned long arg)
{
if (cmd == COMPAT_SPIOCSTYPE) {
void __user *uarg = compat_ptr(arg);
compat_ulong_t compat_type;
if (get_user(compat_type, (compat_ulong_t __user *)uarg))
return -EFAULT;
serport_set_type(tty, compat_type);
return 0;
}
return -EINVAL;
}
#endif
static void serport_ldisc_write_wakeup(struct tty_struct * tty)
{
struct serport *serport = (struct serport *) tty->disc_data;
unsigned long flags;
spin_lock_irqsave(&serport->lock, flags);
if (test_bit(SERPORT_ACTIVE, &serport->flags))
serio_drv_write_wakeup(serport->serio);
spin_unlock_irqrestore(&serport->lock, flags);
}
/*
* The line discipline structure.
*/
static struct tty_ldisc_ops serport_ldisc = {
.owner = THIS_MODULE,
.name = "input",
.open = serport_ldisc_open,
.close = serport_ldisc_close,
.read = serport_ldisc_read,
.ioctl = serport_ldisc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = serport_ldisc_compat_ioctl,
#endif
.receive_buf = serport_ldisc_receive,
.write_wakeup = serport_ldisc_write_wakeup
};
/*
* The functions for insering/removing us as a module.
*/
static int __init serport_init(void)
{
int retval;
retval = tty_register_ldisc(N_MOUSE, &serport_ldisc);
if (retval)
printk(KERN_ERR "serport.c: Error registering line discipline.\n");
return retval;
}
static void __exit serport_exit(void)
{
tty_unregister_ldisc(N_MOUSE);
}
module_init(serport_init);
module_exit(serport_exit);
| gpl-2.0 |
Evervolv/android_kernel_htc_msm8660 | arch/arm/mach-exynos4/mach-smdkc210.c | 2002 | 6080 | /* linux/arch/arm/mach-exynos4/mach-smdkc210.c
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/serial_core.h>
#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
#include <linux/smsc911x.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/sdhci.h>
#include <plat/iic.h>
#include <plat/pd.h>
#include <mach/map.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKC210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI | \
S3C2443_UCON_RXERR_IRQEN)
#define SMDKC210_ULCON_DEFAULT S3C2410_LCON_CS8
#define SMDKC210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
static struct s3c2410_uartcfg smdkc210_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = SMDKC210_UCON_DEFAULT,
.ulcon = SMDKC210_ULCON_DEFAULT,
.ufcon = SMDKC210_UFCON_DEFAULT,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = SMDKC210_UCON_DEFAULT,
.ulcon = SMDKC210_ULCON_DEFAULT,
.ufcon = SMDKC210_UFCON_DEFAULT,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = SMDKC210_UCON_DEFAULT,
.ulcon = SMDKC210_ULCON_DEFAULT,
.ufcon = SMDKC210_UFCON_DEFAULT,
},
[3] = {
.hwport = 3,
.flags = 0,
.ucon = SMDKC210_UCON_DEFAULT,
.ulcon = SMDKC210_ULCON_DEFAULT,
.ufcon = SMDKC210_UFCON_DEFAULT,
},
};
static struct s3c_sdhci_platdata smdkc210_hsmmc0_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
.ext_cd_gpio = EXYNOS4_GPK0(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
};
static struct s3c_sdhci_platdata smdkc210_hsmmc1_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
.ext_cd_gpio = EXYNOS4_GPK0(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
static struct s3c_sdhci_platdata smdkc210_hsmmc2_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
.ext_cd_gpio = EXYNOS4_GPK2(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
};
static struct s3c_sdhci_platdata smdkc210_hsmmc3_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
.ext_cd_gpio = EXYNOS4_GPK2(2),
.ext_cd_gpio_invert = 1,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
static struct resource smdkc210_smsc911x_resources[] = {
[0] = {
.start = EXYNOS4_PA_SROM_BANK(1),
.end = EXYNOS4_PA_SROM_BANK(1) + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_EINT(5),
.end = IRQ_EINT(5),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct smsc911x_platform_config smsc9215_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
.phy_interface = PHY_INTERFACE_MODE_MII,
.mac = {0x00, 0x80, 0x00, 0x23, 0x45, 0x67},
};
static struct platform_device smdkc210_smsc911x = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smdkc210_smsc911x_resources),
.resource = smdkc210_smsc911x_resources,
.dev = {
.platform_data = &smsc9215_config,
},
};
static struct i2c_board_info i2c_devs1[] __initdata = {
{I2C_BOARD_INFO("wm8994", 0x1a),},
};
static struct platform_device *smdkc210_devices[] __initdata = {
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
&s3c_device_hsmmc2,
&s3c_device_hsmmc3,
&s3c_device_i2c1,
&s3c_device_rtc,
&s3c_device_wdt,
&exynos4_device_ac97,
&exynos4_device_i2s0,
&exynos4_device_pd[PD_MFC],
&exynos4_device_pd[PD_G3D],
&exynos4_device_pd[PD_LCD0],
&exynos4_device_pd[PD_LCD1],
&exynos4_device_pd[PD_CAM],
&exynos4_device_pd[PD_TV],
&exynos4_device_pd[PD_GPS],
&exynos4_device_sysmmu,
&samsung_asoc_dma,
&smdkc210_smsc911x,
};
static void __init smdkc210_smsc911x_init(void)
{
u32 cs1;
/* configure nCS1 width to 16 bits */
cs1 = __raw_readl(S5P_SROM_BW) &
~(S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS1__SHIFT);
cs1 |= ((1 << S5P_SROM_BW__DATAWIDTH__SHIFT) |
(1 << S5P_SROM_BW__WAITENABLE__SHIFT) |
(1 << S5P_SROM_BW__BYTEENABLE__SHIFT)) <<
S5P_SROM_BW__NCS1__SHIFT;
__raw_writel(cs1, S5P_SROM_BW);
/* set timing for nCS1 suitable for ethernet chip */
__raw_writel((0x1 << S5P_SROM_BCX__PMC__SHIFT) |
(0x9 << S5P_SROM_BCX__TACP__SHIFT) |
(0xc << S5P_SROM_BCX__TCAH__SHIFT) |
(0x1 << S5P_SROM_BCX__TCOH__SHIFT) |
(0x6 << S5P_SROM_BCX__TACC__SHIFT) |
(0x1 << S5P_SROM_BCX__TCOS__SHIFT) |
(0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
}
static void __init smdkc210_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(smdkc210_uartcfgs, ARRAY_SIZE(smdkc210_uartcfgs));
}
static void __init smdkc210_machine_init(void)
{
s3c_i2c1_set_platdata(NULL);
i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
smdkc210_smsc911x_init();
s3c_sdhci0_set_platdata(&smdkc210_hsmmc0_pdata);
s3c_sdhci1_set_platdata(&smdkc210_hsmmc1_pdata);
s3c_sdhci2_set_platdata(&smdkc210_hsmmc2_pdata);
s3c_sdhci3_set_platdata(&smdkc210_hsmmc3_pdata);
platform_add_devices(smdkc210_devices, ARRAY_SIZE(smdkc210_devices));
}
MACHINE_START(SMDKC210, "SMDKC210")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.boot_params = S5P_PA_SDRAM + 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdkc210_map_io,
.init_machine = smdkc210_machine_init,
.timer = &exynos4_timer,
MACHINE_END
| gpl-2.0 |
kozmikkick/kozmikvigor | drivers/mfd/max8997.c | 2002 | 10842 | /*
* max8997.c - mfd core driver for the Maxim 8966 and 8997
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@smasung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This driver is based on max8998.c
*/
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
#define I2C_ADDR_PMIC (0xCC >> 1)
#define I2C_ADDR_MUIC (0x4A >> 1)
#define I2C_ADDR_BATTERY (0x6C >> 1)
#define I2C_ADDR_RTC (0x0C >> 1)
#define I2C_ADDR_HAPTIC (0x90 >> 1)
static struct mfd_cell max8997_devs[] = {
{ .name = "max8997-pmic", },
{ .name = "max8997-rtc", },
{ .name = "max8997-battery", },
{ .name = "max8997-haptic", },
{ .name = "max8997-muic", },
{ .name = "max8997-flash", },
};
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_read_byte_data(i2c, reg);
mutex_unlock(&max8997->iolock);
if (ret < 0)
return ret;
ret &= 0xff;
*dest = ret;
return 0;
}
EXPORT_SYMBOL_GPL(max8997_read_reg);
int max8997_bulk_read(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_read_i2c_block_data(i2c, reg, count, buf);
mutex_unlock(&max8997->iolock);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(max8997_bulk_read);
int max8997_write_reg(struct i2c_client *i2c, u8 reg, u8 value)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_write_byte_data(i2c, reg, value);
mutex_unlock(&max8997->iolock);
return ret;
}
EXPORT_SYMBOL_GPL(max8997_write_reg);
int max8997_bulk_write(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_write_i2c_block_data(i2c, reg, count, buf);
mutex_unlock(&max8997->iolock);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(max8997_bulk_write);
int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (val & mask) | (old_val & (~mask));
ret = i2c_smbus_write_byte_data(i2c, reg, new_val);
}
mutex_unlock(&max8997->iolock);
return ret;
}
EXPORT_SYMBOL_GPL(max8997_update_reg);
static int max8997_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max8997_dev *max8997;
struct max8997_platform_data *pdata = i2c->dev.platform_data;
int ret = 0;
max8997 = kzalloc(sizeof(struct max8997_dev), GFP_KERNEL);
if (max8997 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, max8997);
max8997->dev = &i2c->dev;
max8997->i2c = i2c;
max8997->type = id->driver_data;
if (!pdata)
goto err;
max8997->wakeup = pdata->wakeup;
mutex_init(&max8997->iolock);
max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
i2c_set_clientdata(max8997->rtc, max8997);
max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
i2c_set_clientdata(max8997->haptic, max8997);
max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
i2c_set_clientdata(max8997->muic, max8997);
pm_runtime_set_active(max8997->dev);
mfd_add_devices(max8997->dev, -1, max8997_devs,
ARRAY_SIZE(max8997_devs),
NULL, 0);
/*
* TODO: enable others (flash, muic, rtc, battery, ...) and
* check the return value
*/
if (ret < 0)
goto err_mfd;
return ret;
err_mfd:
mfd_remove_devices(max8997->dev);
i2c_unregister_device(max8997->muic);
i2c_unregister_device(max8997->haptic);
i2c_unregister_device(max8997->rtc);
err:
kfree(max8997);
return ret;
}
static int max8997_i2c_remove(struct i2c_client *i2c)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
mfd_remove_devices(max8997->dev);
i2c_unregister_device(max8997->muic);
i2c_unregister_device(max8997->haptic);
i2c_unregister_device(max8997->rtc);
kfree(max8997);
return 0;
}
static const struct i2c_device_id max8997_i2c_id[] = {
{ "max8997", TYPE_MAX8997 },
{ "max8966", TYPE_MAX8966 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
u8 max8997_dumpaddr_pmic[] = {
MAX8997_REG_INT1MSK,
MAX8997_REG_INT2MSK,
MAX8997_REG_INT3MSK,
MAX8997_REG_INT4MSK,
MAX8997_REG_MAINCON1,
MAX8997_REG_MAINCON2,
MAX8997_REG_BUCKRAMP,
MAX8997_REG_BUCK1CTRL,
MAX8997_REG_BUCK1DVS1,
MAX8997_REG_BUCK1DVS2,
MAX8997_REG_BUCK1DVS3,
MAX8997_REG_BUCK1DVS4,
MAX8997_REG_BUCK1DVS5,
MAX8997_REG_BUCK1DVS6,
MAX8997_REG_BUCK1DVS7,
MAX8997_REG_BUCK1DVS8,
MAX8997_REG_BUCK2CTRL,
MAX8997_REG_BUCK2DVS1,
MAX8997_REG_BUCK2DVS2,
MAX8997_REG_BUCK2DVS3,
MAX8997_REG_BUCK2DVS4,
MAX8997_REG_BUCK2DVS5,
MAX8997_REG_BUCK2DVS6,
MAX8997_REG_BUCK2DVS7,
MAX8997_REG_BUCK2DVS8,
MAX8997_REG_BUCK3CTRL,
MAX8997_REG_BUCK3DVS,
MAX8997_REG_BUCK4CTRL,
MAX8997_REG_BUCK4DVS,
MAX8997_REG_BUCK5CTRL,
MAX8997_REG_BUCK5DVS1,
MAX8997_REG_BUCK5DVS2,
MAX8997_REG_BUCK5DVS3,
MAX8997_REG_BUCK5DVS4,
MAX8997_REG_BUCK5DVS5,
MAX8997_REG_BUCK5DVS6,
MAX8997_REG_BUCK5DVS7,
MAX8997_REG_BUCK5DVS8,
MAX8997_REG_BUCK6CTRL,
MAX8997_REG_BUCK6BPSKIPCTRL,
MAX8997_REG_BUCK7CTRL,
MAX8997_REG_BUCK7DVS,
MAX8997_REG_LDO1CTRL,
MAX8997_REG_LDO2CTRL,
MAX8997_REG_LDO3CTRL,
MAX8997_REG_LDO4CTRL,
MAX8997_REG_LDO5CTRL,
MAX8997_REG_LDO6CTRL,
MAX8997_REG_LDO7CTRL,
MAX8997_REG_LDO8CTRL,
MAX8997_REG_LDO9CTRL,
MAX8997_REG_LDO10CTRL,
MAX8997_REG_LDO11CTRL,
MAX8997_REG_LDO12CTRL,
MAX8997_REG_LDO13CTRL,
MAX8997_REG_LDO14CTRL,
MAX8997_REG_LDO15CTRL,
MAX8997_REG_LDO16CTRL,
MAX8997_REG_LDO17CTRL,
MAX8997_REG_LDO18CTRL,
MAX8997_REG_LDO21CTRL,
MAX8997_REG_MBCCTRL1,
MAX8997_REG_MBCCTRL2,
MAX8997_REG_MBCCTRL3,
MAX8997_REG_MBCCTRL4,
MAX8997_REG_MBCCTRL5,
MAX8997_REG_MBCCTRL6,
MAX8997_REG_OTPCGHCVS,
MAX8997_REG_SAFEOUTCTRL,
MAX8997_REG_LBCNFG1,
MAX8997_REG_LBCNFG2,
MAX8997_REG_BBCCTRL,
MAX8997_REG_FLASH1_CUR,
MAX8997_REG_FLASH2_CUR,
MAX8997_REG_MOVIE_CUR,
MAX8997_REG_GSMB_CUR,
MAX8997_REG_BOOST_CNTL,
MAX8997_REG_LEN_CNTL,
MAX8997_REG_FLASH_CNTL,
MAX8997_REG_WDT_CNTL,
MAX8997_REG_MAXFLASH1,
MAX8997_REG_MAXFLASH2,
MAX8997_REG_FLASHSTATUSMASK,
MAX8997_REG_GPIOCNTL1,
MAX8997_REG_GPIOCNTL2,
MAX8997_REG_GPIOCNTL3,
MAX8997_REG_GPIOCNTL4,
MAX8997_REG_GPIOCNTL5,
MAX8997_REG_GPIOCNTL6,
MAX8997_REG_GPIOCNTL7,
MAX8997_REG_GPIOCNTL8,
MAX8997_REG_GPIOCNTL9,
MAX8997_REG_GPIOCNTL10,
MAX8997_REG_GPIOCNTL11,
MAX8997_REG_GPIOCNTL12,
MAX8997_REG_LDO1CONFIG,
MAX8997_REG_LDO2CONFIG,
MAX8997_REG_LDO3CONFIG,
MAX8997_REG_LDO4CONFIG,
MAX8997_REG_LDO5CONFIG,
MAX8997_REG_LDO6CONFIG,
MAX8997_REG_LDO7CONFIG,
MAX8997_REG_LDO8CONFIG,
MAX8997_REG_LDO9CONFIG,
MAX8997_REG_LDO10CONFIG,
MAX8997_REG_LDO11CONFIG,
MAX8997_REG_LDO12CONFIG,
MAX8997_REG_LDO13CONFIG,
MAX8997_REG_LDO14CONFIG,
MAX8997_REG_LDO15CONFIG,
MAX8997_REG_LDO16CONFIG,
MAX8997_REG_LDO17CONFIG,
MAX8997_REG_LDO18CONFIG,
MAX8997_REG_LDO21CONFIG,
MAX8997_REG_DVSOKTIMER1,
MAX8997_REG_DVSOKTIMER2,
MAX8997_REG_DVSOKTIMER4,
MAX8997_REG_DVSOKTIMER5,
};
u8 max8997_dumpaddr_muic[] = {
MAX8997_MUIC_REG_INTMASK1,
MAX8997_MUIC_REG_INTMASK2,
MAX8997_MUIC_REG_INTMASK3,
MAX8997_MUIC_REG_CDETCTRL,
MAX8997_MUIC_REG_CONTROL1,
MAX8997_MUIC_REG_CONTROL2,
MAX8997_MUIC_REG_CONTROL3,
};
u8 max8997_dumpaddr_haptic[] = {
MAX8997_HAPTIC_REG_CONF1,
MAX8997_HAPTIC_REG_CONF2,
MAX8997_HAPTIC_REG_DRVCONF,
MAX8997_HAPTIC_REG_CYCLECONF1,
MAX8997_HAPTIC_REG_CYCLECONF2,
MAX8997_HAPTIC_REG_SIGCONF1,
MAX8997_HAPTIC_REG_SIGCONF2,
MAX8997_HAPTIC_REG_SIGCONF3,
MAX8997_HAPTIC_REG_SIGCONF4,
MAX8997_HAPTIC_REG_SIGDC1,
MAX8997_HAPTIC_REG_SIGDC2,
MAX8997_HAPTIC_REG_SIGPWMDC1,
MAX8997_HAPTIC_REG_SIGPWMDC2,
MAX8997_HAPTIC_REG_SIGPWMDC3,
MAX8997_HAPTIC_REG_SIGPWMDC4,
};
static int max8997_freeze(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int i;
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
max8997_read_reg(i2c, max8997_dumpaddr_pmic[i],
&max8997->reg_dump[i]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
max8997_read_reg(i2c, max8997_dumpaddr_muic[i],
&max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
max8997_read_reg(i2c, max8997_dumpaddr_haptic[i],
&max8997->reg_dump[i + MAX8997_REG_PMIC_END +
MAX8997_MUIC_REG_END]);
return 0;
}
static int max8997_restore(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int i;
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
max8997_write_reg(i2c, max8997_dumpaddr_pmic[i],
max8997->reg_dump[i]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
max8997_write_reg(i2c, max8997_dumpaddr_muic[i],
max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
max8997_write_reg(i2c, max8997_dumpaddr_haptic[i],
max8997->reg_dump[i + MAX8997_REG_PMIC_END +
MAX8997_MUIC_REG_END]);
return 0;
}
const struct dev_pm_ops max8997_pm = {
.freeze = max8997_freeze,
.restore = max8997_restore,
};
static struct i2c_driver max8997_i2c_driver = {
.driver = {
.name = "max8997",
.owner = THIS_MODULE,
.pm = &max8997_pm,
},
.probe = max8997_i2c_probe,
.remove = max8997_i2c_remove,
.id_table = max8997_i2c_id,
};
static int __init max8997_i2c_init(void)
{
return i2c_add_driver(&max8997_i2c_driver);
}
/* init early so consumer devices can complete system boot */
subsys_initcall(max8997_i2c_init);
static void __exit max8997_i2c_exit(void)
{
i2c_del_driver(&max8997_i2c_driver);
}
module_exit(max8997_i2c_exit);
MODULE_DESCRIPTION("MAXIM 8997 multi-function core driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Timesys/linux-timesys | arch/arm/mach-exynos4/hotplug.c | 2002 | 2639 | /* linux arch/arm/mach-exynos4/hotplug.c
*
* Cloned from linux/arch/arm/mach-realview/hotplug.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
extern volatile int pen_release;
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
flush_cache_all();
asm volatile(
" mcr p15, 0, %1, c7, c5, 0\n"
" mcr p15, 0, %1, c7, c10, 4\n"
/*
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
" bic %0, %0, %3\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
: "r" (0), "Ir" (CR_C), "Ir" (0x40)
: "cc");
}
static inline void cpu_leave_lowpower(void)
{
unsigned int v;
asm volatile(
"mrc p15, 0, %0, c1, c0, 0\n"
" orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
" orr %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v)
: "Ir" (CR_C), "Ir" (0x40)
: "cc");
}
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
/*
* there is no power-control hardware on this platform, so all
* we can do is put the core into WFI; this is safe as the calling
* code will have already disabled interrupts
*/
for (;;) {
/*
* here's the WFI
*/
asm(".word 0xe320f003\n"
:
:
: "memory", "cc");
if (pen_release == cpu) {
/*
* OK, proper wakeup, we're done
*/
break;
}
/*
* Getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
* Just note it happening - when we're woken, we can report
* its occurrence.
*/
(*spurious)++;
}
}
int platform_cpu_kill(unsigned int cpu)
{
return 1;
}
/*
* platform-specific code to shutdown a CPU
*
* Called with IRQs disabled
*/
void platform_cpu_die(unsigned int cpu)
{
int spurious = 0;
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
platform_do_lowpower(cpu, &spurious);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
if (spurious)
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
}
int platform_cpu_disable(unsigned int cpu)
{
/*
* we don't allow CPU 0 to be shutdown (it is still too special
* e.g. clock tick interrupts)
*/
return cpu == 0 ? -EPERM : 0;
}
| gpl-2.0 |
chase2534/gtab47.freekern | drivers/media/dvb-core/dvbdev.c | 2258 | 11790 | /*
* dvbdev.c
*
* Copyright (C) 2000 Ralph Metzler <ralph@convergence.de>
* & Marcus Metzler <marcus@convergence.de>
* for convergence integrated media GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
#include "dvbdev.h"
static DEFINE_MUTEX(dvbdev_mutex);
static int dvbdev_debug;
module_param(dvbdev_debug, int, 0644);
MODULE_PARM_DESC(dvbdev_debug, "Turn on/off device debugging (default:off).");
#define dprintk if (dvbdev_debug) printk
static LIST_HEAD(dvb_adapter_list);
static DEFINE_MUTEX(dvbdev_register_lock);
static const char * const dnames[] = {
"video", "audio", "sec", "frontend", "demux", "dvr", "ca",
"net", "osd"
};
#ifdef CONFIG_DVB_DYNAMIC_MINORS
#define MAX_DVB_MINORS 256
#define DVB_MAX_IDS MAX_DVB_MINORS
#else
#define DVB_MAX_IDS 4
#define nums2minor(num,type,id) ((num << 6) | (id << 4) | type)
#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64)
#endif
static struct class *dvb_class;
static struct dvb_device *dvb_minors[MAX_DVB_MINORS];
static DECLARE_RWSEM(minor_rwsem);
static int dvb_device_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev;
mutex_lock(&dvbdev_mutex);
down_read(&minor_rwsem);
dvbdev = dvb_minors[iminor(inode)];
if (dvbdev && dvbdev->fops) {
int err = 0;
const struct file_operations *old_fops;
file->private_data = dvbdev;
old_fops = file->f_op;
file->f_op = fops_get(dvbdev->fops);
if (file->f_op == NULL) {
file->f_op = old_fops;
goto fail;
}
if(file->f_op->open)
err = file->f_op->open(inode,file);
if (err) {
fops_put(file->f_op);
file->f_op = fops_get(old_fops);
}
fops_put(old_fops);
up_read(&minor_rwsem);
mutex_unlock(&dvbdev_mutex);
return err;
}
fail:
up_read(&minor_rwsem);
mutex_unlock(&dvbdev_mutex);
return -ENODEV;
}
static const struct file_operations dvb_device_fops =
{
.owner = THIS_MODULE,
.open = dvb_device_open,
.llseek = noop_llseek,
};
static struct cdev dvb_device_cdev;
int dvb_generic_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
if (!dvbdev)
return -ENODEV;
if (!dvbdev->users)
return -EBUSY;
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
if (!dvbdev->readers)
return -EBUSY;
dvbdev->readers--;
} else {
if (!dvbdev->writers)
return -EBUSY;
dvbdev->writers--;
}
dvbdev->users--;
return 0;
}
EXPORT_SYMBOL(dvb_generic_open);
int dvb_generic_release(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
if (!dvbdev)
return -ENODEV;
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
dvbdev->readers++;
} else {
dvbdev->writers++;
}
dvbdev->users++;
return 0;
}
EXPORT_SYMBOL(dvb_generic_release);
long dvb_generic_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct dvb_device *dvbdev = file->private_data;
if (!dvbdev)
return -ENODEV;
if (!dvbdev->kernel_ioctl)
return -EINVAL;
return dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl);
}
EXPORT_SYMBOL(dvb_generic_ioctl);
static int dvbdev_get_free_id (struct dvb_adapter *adap, int type)
{
u32 id = 0;
while (id < DVB_MAX_IDS) {
struct dvb_device *dev;
list_for_each_entry(dev, &adap->device_list, list_head)
if (dev->type == type && dev->id == id)
goto skip;
return id;
skip:
id++;
}
return -ENFILE;
}
int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
const struct dvb_device *template, void *priv, int type)
{
struct dvb_device *dvbdev;
struct file_operations *dvbdevfops;
struct device *clsdev;
int minor;
int id;
mutex_lock(&dvbdev_register_lock);
if ((id = dvbdev_get_free_id (adap, type)) < 0){
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
printk(KERN_ERR "%s: couldn't find free device id\n", __func__);
return -ENFILE;
}
*pdvbdev = dvbdev = kmalloc(sizeof(struct dvb_device), GFP_KERNEL);
if (!dvbdev){
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
dvbdevfops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
if (!dvbdevfops){
kfree (dvbdev);
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
memcpy(dvbdev, template, sizeof(struct dvb_device));
dvbdev->type = type;
dvbdev->id = id;
dvbdev->adapter = adap;
dvbdev->priv = priv;
dvbdev->fops = dvbdevfops;
init_waitqueue_head (&dvbdev->wait_queue);
memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
dvbdevfops->owner = adap->module;
list_add_tail (&dvbdev->list_head, &adap->device_list);
down_write(&minor_rwsem);
#ifdef CONFIG_DVB_DYNAMIC_MINORS
for (minor = 0; minor < MAX_DVB_MINORS; minor++)
if (dvb_minors[minor] == NULL)
break;
if (minor == MAX_DVB_MINORS) {
kfree(dvbdevfops);
kfree(dvbdev);
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
}
#else
minor = nums2minor(adap->num, type, id);
#endif
dvbdev->minor = minor;
dvb_minors[minor] = dvbdev;
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
clsdev = device_create(dvb_class, adap->device,
MKDEV(DVB_MAJOR, minor),
dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
if (IS_ERR(clsdev)) {
printk(KERN_ERR "%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
return PTR_ERR(clsdev);
}
dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, minor, minor);
return 0;
}
EXPORT_SYMBOL(dvb_register_device);
void dvb_unregister_device(struct dvb_device *dvbdev)
{
if (!dvbdev)
return;
down_write(&minor_rwsem);
dvb_minors[dvbdev->minor] = NULL;
up_write(&minor_rwsem);
device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor));
list_del (&dvbdev->list_head);
kfree (dvbdev->fops);
kfree (dvbdev);
}
EXPORT_SYMBOL(dvb_unregister_device);
static int dvbdev_check_free_adapter_num(int num)
{
struct list_head *entry;
list_for_each(entry, &dvb_adapter_list) {
struct dvb_adapter *adap;
adap = list_entry(entry, struct dvb_adapter, list_head);
if (adap->num == num)
return 0;
}
return 1;
}
static int dvbdev_get_free_adapter_num (void)
{
int num = 0;
while (num < DVB_MAX_ADAPTERS) {
if (dvbdev_check_free_adapter_num(num))
return num;
num++;
}
return -ENFILE;
}
int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
struct module *module, struct device *device,
short *adapter_nums)
{
int i, num;
mutex_lock(&dvbdev_register_lock);
for (i = 0; i < DVB_MAX_ADAPTERS; ++i) {
num = adapter_nums[i];
if (num >= 0 && num < DVB_MAX_ADAPTERS) {
/* use the one the driver asked for */
if (dvbdev_check_free_adapter_num(num))
break;
} else {
num = dvbdev_get_free_adapter_num();
break;
}
num = -1;
}
if (num < 0) {
mutex_unlock(&dvbdev_register_lock);
return -ENFILE;
}
memset (adap, 0, sizeof(struct dvb_adapter));
INIT_LIST_HEAD (&adap->device_list);
printk(KERN_INFO "DVB: registering new adapter (%s)\n", name);
adap->num = num;
adap->name = name;
adap->module = module;
adap->device = device;
adap->mfe_shared = 0;
adap->mfe_dvbdev = NULL;
mutex_init (&adap->mfe_lock);
list_add_tail (&adap->list_head, &dvb_adapter_list);
mutex_unlock(&dvbdev_register_lock);
return num;
}
EXPORT_SYMBOL(dvb_register_adapter);
int dvb_unregister_adapter(struct dvb_adapter *adap)
{
mutex_lock(&dvbdev_register_lock);
list_del (&adap->list_head);
mutex_unlock(&dvbdev_register_lock);
return 0;
}
EXPORT_SYMBOL(dvb_unregister_adapter);
/* if the miracle happens and "generic_usercopy()" is included into
the kernel, then this can vanish. please don't make the mistake and
define this as video_usercopy(). this will introduce a dependecy
to the v4l "videodev.o" module, which is unnecessary for some
cards (ie. the budget dvb-cards don't need the v4l module...) */
int dvb_usercopy(struct file *file,
unsigned int cmd, unsigned long arg,
int (*func)(struct file *file,
unsigned int cmd, void *arg))
{
char sbuf[128];
void *mbuf = NULL;
void *parg = NULL;
int err = -EINVAL;
/* Copy arguments into temp kernel buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_NONE:
/*
* For this command, the pointer is actually an integer
* argument.
*/
parg = (void *) arg;
break;
case _IOC_READ: /* some v4l ioctls are marked wrong ... */
case _IOC_WRITE:
case (_IOC_WRITE | _IOC_READ):
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
/* too big to allocate from stack */
mbuf = kmalloc(_IOC_SIZE(cmd),GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd)))
goto out;
break;
}
/* call driver */
if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
err = -ENOTTY;
if (err < 0)
goto out;
/* Copy results into user buffer */
switch (_IOC_DIR(cmd))
{
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
}
static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct dvb_device *dvbdev = dev_get_drvdata(dev);
add_uevent_var(env, "DVB_ADAPTER_NUM=%d", dvbdev->adapter->num);
add_uevent_var(env, "DVB_DEVICE_TYPE=%s", dnames[dvbdev->type]);
add_uevent_var(env, "DVB_DEVICE_NUM=%d", dvbdev->id);
return 0;
}
static char *dvb_devnode(struct device *dev, umode_t *mode)
{
struct dvb_device *dvbdev = dev_get_drvdata(dev);
return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d",
dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id);
}
static int __init init_dvbdev(void)
{
int retval;
dev_t dev = MKDEV(DVB_MAJOR, 0);
if ((retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB")) != 0) {
printk(KERN_ERR "dvb-core: unable to get major %d\n", DVB_MAJOR);
return retval;
}
cdev_init(&dvb_device_cdev, &dvb_device_fops);
if ((retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS)) != 0) {
printk(KERN_ERR "dvb-core: unable register character device\n");
goto error;
}
dvb_class = class_create(THIS_MODULE, "dvb");
if (IS_ERR(dvb_class)) {
retval = PTR_ERR(dvb_class);
goto error;
}
dvb_class->dev_uevent = dvb_uevent;
dvb_class->devnode = dvb_devnode;
return 0;
error:
cdev_del(&dvb_device_cdev);
unregister_chrdev_region(dev, MAX_DVB_MINORS);
return retval;
}
static void __exit exit_dvbdev(void)
{
class_destroy(dvb_class);
cdev_del(&dvb_device_cdev);
unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
}
subsys_initcall(init_dvbdev);
module_exit(exit_dvbdev);
MODULE_DESCRIPTION("DVB Core Driver");
MODULE_AUTHOR("Marcus Metzler, Ralph Metzler, Holger Waechtler");
MODULE_LICENSE("GPL");
| gpl-2.0 |
prashmohan/lxc-fork | drivers/usb/wusbcore/wa-rpipe.c | 2514 | 15415 | /*
* WUSB Wire Adapter
* rpipe management
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
*
* RPIPE
*
* Targeted at different downstream endpoints
*
* Descriptor: use to config the remote pipe.
*
* The number of blocks could be dynamic (wBlocks in descriptor is
* 0)--need to schedule them then.
*
* Each bit in wa->rpipe_bm represents if an rpipe is being used or
* not. Rpipes are represented with a 'struct wa_rpipe' that is
* attached to the hcpriv member of a 'struct usb_host_endpoint'.
*
* When you need to xfer data to an endpoint, you get an rpipe for it
* with wa_ep_rpipe_get(), which gives you a reference to the rpipe
* and keeps a single one (the first one) with the endpoint. When you
* are done transferring, you drop that reference. At the end the
* rpipe is always allocated and bound to the endpoint. There it might
* be recycled when not used.
*
* Addresses:
*
* We use a 1:1 mapping mechanism between port address (0 based
* index, actually) and the address. The USB stack knows about this.
*
* USB Stack port number 4 (1 based)
* WUSB code port index 3 (0 based)
* USB Address 5 (2 based -- 0 is for default, 1 for root hub)
*
* Now, because we don't use the concept as default address exactly
* like the (wired) USB code does, we need to kind of skip it. So we
* never take addresses from the urb->pipe, but from the
* urb->dev->devnum, to make sure that we always have the right
* destination address.
*/
#include <linux/init.h>
#include <asm/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include "wusbhc.h"
#include "wa-hc.h"
static int __rpipe_get_descr(struct wahc *wa,
struct usb_rpipe_descriptor *descr, u16 index)
{
ssize_t result;
struct device *dev = &wa->usb_iface->dev;
/* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
* function because the arguments are different.
*/
result = usb_control_msg(
wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
USB_REQ_GET_DESCRIPTOR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
1000 /* FIXME: arbitrary */);
if (result < 0) {
dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
index, (int)result);
goto error;
}
if (result < sizeof(*descr)) {
dev_err(dev, "rpipe %u: got short descriptor "
"(%zd vs %zd bytes needed)\n",
index, result, sizeof(*descr));
result = -EINVAL;
goto error;
}
result = 0;
error:
return result;
}
/*
*
* The descriptor is assumed to be properly initialized (ie: you got
* it through __rpipe_get_descr()).
*/
static int __rpipe_set_descr(struct wahc *wa,
struct usb_rpipe_descriptor *descr, u16 index)
{
ssize_t result;
struct device *dev = &wa->usb_iface->dev;
/* we cannot use the usb_get_descriptor() function because the
* arguments are different.
*/
result = usb_control_msg(
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_DESCRIPTOR,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
HZ / 10);
if (result < 0) {
dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
index, (int)result);
goto error;
}
if (result < sizeof(*descr)) {
dev_err(dev, "rpipe %u: sent short descriptor "
"(%zd vs %zd bytes required)\n",
index, result, sizeof(*descr));
result = -EINVAL;
goto error;
}
result = 0;
error:
return result;
}
static void rpipe_init(struct wa_rpipe *rpipe)
{
kref_init(&rpipe->refcnt);
spin_lock_init(&rpipe->seg_lock);
INIT_LIST_HEAD(&rpipe->seg_list);
}
static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
if (rpipe_idx < wa->rpipes)
set_bit(rpipe_idx, wa->rpipe_bm);
spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
return rpipe_idx;
}
static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
{
unsigned long flags;
spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
clear_bit(rpipe_idx, wa->rpipe_bm);
spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
}
void rpipe_destroy(struct kref *_rpipe)
{
struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
if (rpipe->ep)
rpipe->ep->hcpriv = NULL;
rpipe_put_idx(rpipe->wa, index);
wa_put(rpipe->wa);
kfree(rpipe);
}
EXPORT_SYMBOL_GPL(rpipe_destroy);
/*
* Locate an idle rpipe, create an structure for it and return it
*
* @wa is referenced and unlocked
* @crs enum rpipe_attr, required endpoint characteristics
*
* The rpipe can be used only sequentially (not in parallel).
*
* The rpipe is moved into the "ready" state.
*/
static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
gfp_t gfp)
{
int result;
unsigned rpipe_idx;
struct wa_rpipe *rpipe;
struct device *dev = &wa->usb_iface->dev;
rpipe = kzalloc(sizeof(*rpipe), gfp);
if (rpipe == NULL)
return -ENOMEM;
rpipe_init(rpipe);
/* Look for an idle pipe */
for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
break;
result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
if (result < 0)
dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
rpipe_idx, result);
else if ((rpipe->descr.bmCharacteristics & crs) != 0)
goto found;
rpipe_put_idx(wa, rpipe_idx);
}
*prpipe = NULL;
kfree(rpipe);
return -ENXIO;
found:
set_bit(rpipe_idx, wa->rpipe_bm);
rpipe->wa = wa_get(wa);
*prpipe = rpipe;
return 0;
}
static int __rpipe_reset(struct wahc *wa, unsigned index)
{
int result;
struct device *dev = &wa->usb_iface->dev;
result = usb_control_msg(
wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_RESET,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
if (result < 0)
dev_err(dev, "rpipe %u: reset failed: %d\n",
index, result);
return result;
}
/*
* Fake companion descriptor for ep0
*
* See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
*/
static struct usb_wireless_ep_comp_descriptor epc0 = {
.bLength = sizeof(epc0),
.bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
/* .bMaxBurst = 1, */
.bMaxSequence = 31,
};
/*
* Look for EP companion descriptor
*
* Get there, look for Inara in the endpoint's extra descriptors
*/
static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
struct device *dev, struct usb_host_endpoint *ep)
{
void *itr;
size_t itr_size;
struct usb_descriptor_header *hdr;
struct usb_wireless_ep_comp_descriptor *epcd;
if (ep->desc.bEndpointAddress == 0) {
epcd = &epc0;
goto out;
}
itr = ep->extra;
itr_size = ep->extralen;
epcd = NULL;
while (itr_size > 0) {
if (itr_size < sizeof(*hdr)) {
dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
"at offset %zu: only %zu bytes left\n",
ep->desc.bEndpointAddress,
itr - (void *) ep->extra, itr_size);
break;
}
hdr = itr;
if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
epcd = itr;
break;
}
if (hdr->bLength > itr_size) {
dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
"at offset %zu (type 0x%02x) "
"length %d but only %zu bytes left\n",
ep->desc.bEndpointAddress,
itr - (void *) ep->extra, hdr->bDescriptorType,
hdr->bLength, itr_size);
break;
}
itr += hdr->bLength;
itr_size -= hdr->bDescriptorType;
}
out:
return epcd;
}
/*
* Aim an rpipe to its device & endpoint destination
*
* Make sure we change the address to unauthenticathed if the device
* is WUSB and it is not authenticated.
*/
static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
{
int result = -ENOMSG; /* better code for lack of companion? */
struct device *dev = &wa->usb_iface->dev;
struct usb_device *usb_dev = urb->dev;
struct usb_wireless_ep_comp_descriptor *epcd;
u8 unauth;
epcd = rpipe_epc_find(dev, ep);
if (epcd == NULL) {
dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
ep->desc.bEndpointAddress);
goto error;
}
unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
__rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
/* FIXME: block allocation system; request with queuing and timeout */
/* FIXME: compute so seg_size > ep->maxpktsize */
rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
/* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
/* FIXME: use maximum speed as supported or recommended by device */
rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
urb->dev->devnum, urb->dev->devnum | unauth,
le16_to_cpu(rpipe->descr.wRPipeIndex),
usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
/* see security.c:wusb_update_address() */
if (unlikely(urb->dev->devnum == 0x80))
rpipe->descr.bDeviceAddress = 0;
else
rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
/* FIXME: bDataSequence */
rpipe->descr.bDataSequence = 0;
/* FIXME: dwCurrentWindow */
rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
/* FIXME: bMaxDataSequence */
rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
rpipe->descr.bInterval = ep->desc.bInterval;
/* FIXME: bOverTheAirInterval */
rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
/* FIXME: xmit power & preamble blah blah */
rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
/* rpipe->descr.bmCharacteristics RO */
/* FIXME: bmRetryOptions */
rpipe->descr.bmRetryOptions = 15;
/* FIXME: use for assessing link quality? */
rpipe->descr.wNumTransactionErrors = 0;
result = __rpipe_set_descr(wa, &rpipe->descr,
le16_to_cpu(rpipe->descr.wRPipeIndex));
if (result < 0) {
dev_err(dev, "Cannot aim rpipe: %d\n", result);
goto error;
}
result = 0;
error:
return result;
}
/*
* Check an aimed rpipe to make sure it points to where we want
*
* We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
* space; when it is like that, we or 0x80 to make an unauth address.
*/
static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
const struct usb_host_endpoint *ep,
const struct urb *urb, gfp_t gfp)
{
int result = 0; /* better code for lack of companion? */
struct device *dev = &wa->usb_iface->dev;
struct usb_device *usb_dev = urb->dev;
u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
#define AIM_CHECK(rdf, val, text) \
do { \
if (rpipe->descr.rdf != (val)) { \
dev_err(dev, \
"rpipe aim discrepancy: " #rdf " " text "\n", \
rpipe->descr.rdf, (val)); \
result = -EINVAL; \
WARN_ON(1); \
} \
} while (0)
AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
"(%u vs %u)");
AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
"(%u vs %u)");
AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
#undef AIM_CHECK
return result;
}
#ifndef CONFIG_BUG
#define CONFIG_BUG 0
#endif
/*
* Make sure there is an rpipe allocated for an endpoint
*
* If already allocated, we just refcount it; if not, we get an
* idle one, aim it to the right location and take it.
*
* Attaches to ep->hcpriv and rpipe->ep to ep.
*/
int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
struct urb *urb, gfp_t gfp)
{
int result = 0;
struct device *dev = &wa->usb_iface->dev;
struct wa_rpipe *rpipe;
u8 eptype;
mutex_lock(&wa->rpipe_mutex);
rpipe = ep->hcpriv;
if (rpipe != NULL) {
if (CONFIG_BUG == 1) {
result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
if (result < 0)
goto error;
}
__rpipe_get(rpipe);
dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n",
ep->desc.bEndpointAddress,
le16_to_cpu(rpipe->descr.wRPipeIndex));
} else {
/* hmm, assign idle rpipe, aim it */
result = -ENOBUFS;
eptype = ep->desc.bmAttributes & 0x03;
result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
if (result < 0)
goto error;
result = rpipe_aim(rpipe, wa, ep, urb, gfp);
if (result < 0) {
rpipe_put(rpipe);
goto error;
}
ep->hcpriv = rpipe;
rpipe->ep = ep;
__rpipe_get(rpipe); /* for caching into ep->hcpriv */
dev_dbg(dev, "ep 0x%02x: using rpipe %u\n",
ep->desc.bEndpointAddress,
le16_to_cpu(rpipe->descr.wRPipeIndex));
}
error:
mutex_unlock(&wa->rpipe_mutex);
return result;
}
/*
* Allocate the bitmap for each rpipe.
*/
int wa_rpipes_create(struct wahc *wa)
{
wa->rpipes = wa->wa_descr->wNumRPipes;
wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
GFP_KERNEL);
if (wa->rpipe_bm == NULL)
return -ENOMEM;
return 0;
}
void wa_rpipes_destroy(struct wahc *wa)
{
struct device *dev = &wa->usb_iface->dev;
if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
char buf[256];
WARN_ON(1);
bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
}
kfree(wa->rpipe_bm);
}
/*
* Release resources allocated for an endpoint
*
* If there is an associated rpipe to this endpoint, Abort any pending
* transfers and put it. If the rpipe ends up being destroyed,
* __rpipe_destroy() will cleanup ep->hcpriv.
*
* This is called before calling hcd->stop(), so you don't need to do
* anything else in there.
*/
void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
{
struct wa_rpipe *rpipe;
mutex_lock(&wa->rpipe_mutex);
rpipe = ep->hcpriv;
if (rpipe != NULL) {
u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
usb_control_msg(
wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
USB_REQ_RPIPE_ABORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
rpipe_put(rpipe);
}
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_ep_disable);
| gpl-2.0 |
n3ocort3x/endeavoru_3.14 | drivers/net/arm/etherh.c | 2770 | 20541 | /*
* linux/drivers/acorn/net/etherh.c
*
* Copyright (C) 2000-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* NS8390 I-cubed EtherH and ANT EtherM specific driver
* Thanks to I-Cubed for information on their cards.
* EtherM conversion (C) 1999 Chris Kemp and Tim Watterton
* EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan)
* EtherM integration re-engineered by Russell King.
*
* Changelog:
* 08-12-1996 RMK 1.00 Created
* RMK 1.03 Added support for EtherLan500 cards
* 23-11-1997 RMK 1.04 Added media autodetection
* 16-04-1998 RMK 1.05 Improved media autodetection
* 10-02-2000 RMK 1.06 Updated for 2.3.43
* 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8
* 12-10-1999 CK/TEW EtherM driver first release
* 21-12-2000 TTC EtherH/EtherM integration
* 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver.
* 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <asm/system.h>
#include <asm/ecard.h>
#include <asm/io.h>
#define EI_SHIFT(x) (ei_local->reg_offset[x])
#define ei_inb(_p) readb((void __iomem *)_p)
#define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p)
#define ei_inb_p(_p) readb((void __iomem *)_p)
#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
#define NET_DEBUG 0
#define DEBUG_INIT 2
#define DRV_NAME "etherh"
#define DRV_VERSION "1.11"
static char version[] __initdata =
"EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
#include "../lib8390.c"
static unsigned int net_debug = NET_DEBUG;
struct etherh_priv {
void __iomem *ioc_fast;
void __iomem *memc;
void __iomem *dma_base;
unsigned int id;
void __iomem *ctrl_port;
unsigned char ctrl;
u32 supported;
};
struct etherh_data {
unsigned long ns8390_offset;
unsigned long dataport_offset;
unsigned long ctrlport_offset;
int ctrl_ioc;
const char name[16];
u32 supported;
unsigned char tx_start_page;
unsigned char stop_page;
};
MODULE_AUTHOR("Russell King");
MODULE_DESCRIPTION("EtherH/EtherM driver");
MODULE_LICENSE("GPL");
#define ETHERH500_DATAPORT 0x800 /* MEMC */
#define ETHERH500_NS8390 0x000 /* MEMC */
#define ETHERH500_CTRLPORT 0x800 /* IOC */
#define ETHERH600_DATAPORT 0x040 /* MEMC */
#define ETHERH600_NS8390 0x800 /* MEMC */
#define ETHERH600_CTRLPORT 0x200 /* MEMC */
#define ETHERH_CP_IE 1
#define ETHERH_CP_IF 2
#define ETHERH_CP_HEARTBEAT 2
#define ETHERH_TX_START_PAGE 1
#define ETHERH_STOP_PAGE 127
/*
* These came from CK/TEW
*/
#define ETHERM_DATAPORT 0x200 /* MEMC */
#define ETHERM_NS8390 0x800 /* MEMC */
#define ETHERM_CTRLPORT 0x23c /* MEMC */
#define ETHERM_TX_START_PAGE 64
#define ETHERM_STOP_PAGE 127
/* ------------------------------------------------------------------------ */
#define etherh_priv(dev) \
((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device)))
static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask)
{
unsigned char ctrl = eh->ctrl | mask;
eh->ctrl = ctrl;
writeb(ctrl, eh->ctrl_port);
}
static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask)
{
unsigned char ctrl = eh->ctrl & ~mask;
eh->ctrl = ctrl;
writeb(ctrl, eh->ctrl_port);
}
static inline unsigned int etherh_get_stat(struct etherh_priv *eh)
{
return readb(eh->ctrl_port);
}
static void etherh_irq_enable(ecard_t *ec, int irqnr)
{
struct etherh_priv *eh = ec->irq_data;
etherh_set_ctrl(eh, ETHERH_CP_IE);
}
static void etherh_irq_disable(ecard_t *ec, int irqnr)
{
struct etherh_priv *eh = ec->irq_data;
etherh_clr_ctrl(eh, ETHERH_CP_IE);
}
static expansioncard_ops_t etherh_ops = {
.irqenable = etherh_irq_enable,
.irqdisable = etherh_irq_disable,
};
static void
etherh_setif(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
void __iomem *addr;
local_irq_save(flags);
/* set the interface type */
switch (etherh_priv(dev)->id) {
case PROD_I3_ETHERLAN600:
case PROD_I3_ETHERLAN600A:
addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
switch (dev->if_port) {
case IF_PORT_10BASE2:
writeb((readb(addr) & 0xf8) | 1, addr);
break;
case IF_PORT_10BASET:
writeb((readb(addr) & 0xf8), addr);
break;
}
break;
case PROD_I3_ETHERLAN500:
switch (dev->if_port) {
case IF_PORT_10BASE2:
etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF);
break;
case IF_PORT_10BASET:
etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF);
break;
}
break;
default:
break;
}
local_irq_restore(flags);
}
static int
etherh_getifstat(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
void __iomem *addr;
int stat = 0;
switch (etherh_priv(dev)->id) {
case PROD_I3_ETHERLAN600:
case PROD_I3_ETHERLAN600A:
addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
switch (dev->if_port) {
case IF_PORT_10BASE2:
stat = 1;
break;
case IF_PORT_10BASET:
stat = readb(addr) & 4;
break;
}
break;
case PROD_I3_ETHERLAN500:
switch (dev->if_port) {
case IF_PORT_10BASE2:
stat = 1;
break;
case IF_PORT_10BASET:
stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT;
break;
}
break;
default:
stat = 0;
break;
}
return stat != 0;
}
/*
* Configure the interface. Note that we ignore the other
* parts of ifmap, since its mostly meaningless for this driver.
*/
static int etherh_set_config(struct net_device *dev, struct ifmap *map)
{
switch (map->port) {
case IF_PORT_10BASE2:
case IF_PORT_10BASET:
/*
* If the user explicitly sets the interface
* media type, turn off automedia detection.
*/
dev->flags &= ~IFF_AUTOMEDIA;
dev->if_port = map->port;
break;
default:
return -EINVAL;
}
etherh_setif(dev);
return 0;
}
/*
* Reset the 8390 (hard reset). Note that we can't actually do this.
*/
static void
etherh_reset(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
void __iomem *addr = (void __iomem *)dev->base_addr;
writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
/*
* See if we need to change the interface type.
* Note that we use 'interface_num' as a flag
* to indicate that we need to change the media.
*/
if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) {
ei_local->interface_num = 0;
if (dev->if_port == IF_PORT_10BASET)
dev->if_port = IF_PORT_10BASE2;
else
dev->if_port = IF_PORT_10BASET;
etherh_setif(dev);
}
}
/*
* Write a block of data out to the 8390
*/
static void
etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page)
{
struct ei_device *ei_local = netdev_priv(dev);
unsigned long dma_start;
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
" DMAstat %d irqlock %d\n", dev->name,
ei_local->dmaing, ei_local->irqlock);
return;
}
/*
* Make sure we have a round number of bytes if we're in word mode.
*/
if (count & 1 && ei_local->word16)
count++;
ei_local->dmaing = 1;
addr = (void __iomem *)dev->base_addr;
dma_base = etherh_priv(dev)->dma_base;
count = (count + 1) & ~1;
writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
writeb (0x42, addr + EN0_RCNTLO);
writeb (0x00, addr + EN0_RCNTHI);
writeb (0x42, addr + EN0_RSARLO);
writeb (0x00, addr + EN0_RSARHI);
writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
udelay (1);
writeb (ENISR_RDC, addr + EN0_ISR);
writeb (count, addr + EN0_RCNTLO);
writeb (count >> 8, addr + EN0_RCNTHI);
writeb (0, addr + EN0_RSARLO);
writeb (start_page, addr + EN0_RSARHI);
writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD);
if (ei_local->word16)
writesw (dma_base, buf, count >> 1);
else
writesb (dma_base, buf, count);
dma_start = jiffies;
while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
dev->name);
etherh_reset (dev);
__NS8390_init (dev, 1);
break;
}
writeb (ENISR_RDC, addr + EN0_ISR);
ei_local->dmaing = 0;
}
/*
* Read a block of data from the 8390
*/
static void
etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
struct ei_device *ei_local = netdev_priv(dev);
unsigned char *buf;
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
" DMAstat %d irqlock %d\n", dev->name,
ei_local->dmaing, ei_local->irqlock);
return;
}
ei_local->dmaing = 1;
addr = (void __iomem *)dev->base_addr;
dma_base = etherh_priv(dev)->dma_base;
buf = skb->data;
writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
writeb (count, addr + EN0_RCNTLO);
writeb (count >> 8, addr + EN0_RCNTHI);
writeb (ring_offset, addr + EN0_RSARLO);
writeb (ring_offset >> 8, addr + EN0_RSARHI);
writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
if (ei_local->word16) {
readsw (dma_base, buf, count >> 1);
if (count & 1)
buf[count - 1] = readb (dma_base);
} else
readsb (dma_base, buf, count);
writeb (ENISR_RDC, addr + EN0_ISR);
ei_local->dmaing = 0;
}
/*
* Read a header from the 8390
*/
static void
etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
struct ei_device *ei_local = netdev_priv(dev);
void __iomem *dma_base, *addr;
if (ei_local->dmaing) {
printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
" DMAstat %d irqlock %d\n", dev->name,
ei_local->dmaing, ei_local->irqlock);
return;
}
ei_local->dmaing = 1;
addr = (void __iomem *)dev->base_addr;
dma_base = etherh_priv(dev)->dma_base;
writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
writeb (sizeof (*hdr), addr + EN0_RCNTLO);
writeb (0, addr + EN0_RCNTHI);
writeb (0, addr + EN0_RSARLO);
writeb (ring_page, addr + EN0_RSARHI);
writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
if (ei_local->word16)
readsw (dma_base, hdr, sizeof (*hdr) >> 1);
else
readsb (dma_base, hdr, sizeof (*hdr));
writeb (ENISR_RDC, addr + EN0_ISR);
ei_local->dmaing = 0;
}
/*
* Open/initialize the board. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is non-reboot way to recover if something goes wrong.
*/
static int
etherh_open(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
if (!is_valid_ether_addr(dev->dev_addr)) {
printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
dev->name);
return -EINVAL;
}
if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
return -EAGAIN;
/*
* Make sure that we aren't going to change the
* media type on the next reset - we are about to
* do automedia manually now.
*/
ei_local->interface_num = 0;
/*
* If we are doing automedia detection, do it now.
* This is more reliable than the 8390's detection.
*/
if (dev->flags & IFF_AUTOMEDIA) {
dev->if_port = IF_PORT_10BASET;
etherh_setif(dev);
mdelay(1);
if (!etherh_getifstat(dev)) {
dev->if_port = IF_PORT_10BASE2;
etherh_setif(dev);
}
} else
etherh_setif(dev);
etherh_reset(dev);
__ei_open(dev);
return 0;
}
/*
* The inverse routine to etherh_open().
*/
static int
etherh_close(struct net_device *dev)
{
__ei_close (dev);
free_irq (dev->irq, dev);
return 0;
}
/*
* Initialisation
*/
static void __init etherh_banner(void)
{
static int version_printed;
if (net_debug && version_printed++ == 0)
printk(KERN_INFO "%s", version);
}
/*
* Read the ethernet address string from the on board rom.
* This is an ascii string...
*/
static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
{
struct in_chunk_dir cd;
char *s;
if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
printk(KERN_ERR "%s: unable to read podule description string\n",
dev_name(&ec->dev));
goto no_addr;
}
s = strchr(cd.d.string, '(');
if (s) {
int i;
for (i = 0; i < 6; i++) {
addr[i] = simple_strtoul(s + 1, &s, 0x10);
if (*s != (i == 5? ')' : ':'))
break;
}
if (i == 6)
return 0;
}
printk(KERN_ERR "%s: unable to parse MAC address: %s\n",
dev_name(&ec->dev), cd.d.string);
no_addr:
return -ENODEV;
}
/*
* Create an ethernet address from the system serial number.
*/
static int __init etherm_addr(char *addr)
{
unsigned int serial;
if (system_serial_low == 0 && system_serial_high == 0)
return -ENODEV;
serial = system_serial_low | system_serial_high;
addr[0] = 0;
addr[1] = 0;
addr[2] = 0xa4;
addr[3] = 0x10 + (serial >> 24);
addr[4] = serial >> 16;
addr[5] = serial >> 8;
return 0;
}
static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = etherh_priv(dev)->supported;
ethtool_cmd_speed_set(cmd, SPEED_10);
cmd->duplex = DUPLEX_HALF;
cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
return 0;
}
static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
switch (cmd->autoneg) {
case AUTONEG_ENABLE:
dev->flags |= IFF_AUTOMEDIA;
break;
case AUTONEG_DISABLE:
switch (cmd->port) {
case PORT_TP:
dev->if_port = IF_PORT_10BASET;
break;
case PORT_BNC:
dev->if_port = IF_PORT_10BASE2;
break;
default:
return -EINVAL;
}
dev->flags &= ~IFF_AUTOMEDIA;
break;
default:
return -EINVAL;
}
etherh_setif(dev);
return 0;
}
static const struct ethtool_ops etherh_ethtool_ops = {
.get_settings = etherh_get_settings,
.set_settings = etherh_set_settings,
.get_drvinfo = etherh_get_drvinfo,
};
static const struct net_device_ops etherh_netdev_ops = {
.ndo_open = etherh_open,
.ndo_stop = etherh_close,
.ndo_set_config = etherh_set_config,
.ndo_start_xmit = __ei_start_xmit,
.ndo_tx_timeout = __ei_tx_timeout,
.ndo_get_stats = __ei_get_stats,
.ndo_set_multicast_list = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
#endif
};
static u32 etherh_regoffsets[16];
static u32 etherm_regoffsets[16];
static int __devinit
etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
{
const struct etherh_data *data = id->data;
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
int ret;
etherh_banner();
ret = ecard_request_resources(ec);
if (ret)
goto out;
dev = ____alloc_ei_netdev(sizeof(struct etherh_priv));
if (!dev) {
ret = -ENOMEM;
goto release;
}
SET_NETDEV_DEV(dev, &ec->dev);
dev->netdev_ops = ðerh_netdev_ops;
dev->irq = ec->irq;
dev->ethtool_ops = ðerh_ethtool_ops;
if (data->supported & SUPPORTED_Autoneg)
dev->flags |= IFF_AUTOMEDIA;
if (data->supported & SUPPORTED_TP) {
dev->flags |= IFF_PORTSEL;
dev->if_port = IF_PORT_10BASET;
} else if (data->supported & SUPPORTED_BNC) {
dev->flags |= IFF_PORTSEL;
dev->if_port = IF_PORT_10BASE2;
} else
dev->if_port = IF_PORT_UNKNOWN;
eh = etherh_priv(dev);
eh->supported = data->supported;
eh->ctrl = 0;
eh->id = ec->cid.product;
eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE);
if (!eh->memc) {
ret = -ENOMEM;
goto free;
}
eh->ctrl_port = eh->memc;
if (data->ctrl_ioc) {
eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE);
if (!eh->ioc_fast) {
ret = -ENOMEM;
goto free;
}
eh->ctrl_port = eh->ioc_fast;
}
dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset;
eh->dma_base = eh->memc + data->dataport_offset;
eh->ctrl_port += data->ctrlport_offset;
/*
* IRQ and control port handling - only for non-NIC slot cards.
*/
if (ec->slot_no != 8) {
ecard_setirq(ec, ðerh_ops, eh);
} else {
/*
* If we're in the NIC slot, make sure the IRQ is enabled
*/
etherh_set_ctrl(eh, ETHERH_CP_IE);
}
ei_local = netdev_priv(dev);
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
etherm_addr(dev->dev_addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
etherh_addr(dev->dev_addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
ei_local->name = dev->name;
ei_local->word16 = 1;
ei_local->tx_start_page = data->tx_start_page;
ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES;
ei_local->stop_page = data->stop_page;
ei_local->reset_8390 = etherh_reset;
ei_local->block_input = etherh_block_input;
ei_local->block_output = etherh_block_output;
ei_local->get_8390_hdr = etherh_get_header;
ei_local->interface_num = 0;
etherh_reset(dev);
__NS8390_init(dev, 0);
ret = register_netdev(dev);
if (ret)
goto free;
printk(KERN_INFO "%s: %s in slot %d, %pM\n",
dev->name, data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
return 0;
free:
free_netdev(dev);
release:
ecard_release_resources(ec);
out:
return ret;
}
static void __devexit etherh_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
free_netdev(dev);
ecard_release_resources(ec);
}
static struct etherh_data etherm_data = {
.ns8390_offset = ETHERM_NS8390,
.dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT,
.ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT,
.name = "ANT EtherM",
.supported = SUPPORTED_10baseT_Half,
.tx_start_page = ETHERM_TX_START_PAGE,
.stop_page = ETHERM_STOP_PAGE,
};
static struct etherh_data etherlan500_data = {
.ns8390_offset = ETHERH500_NS8390,
.dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT,
.ctrlport_offset = ETHERH500_CTRLPORT,
.ctrl_ioc = 1,
.name = "i3 EtherH 500",
.supported = SUPPORTED_10baseT_Half,
.tx_start_page = ETHERH_TX_START_PAGE,
.stop_page = ETHERH_STOP_PAGE,
};
static struct etherh_data etherlan600_data = {
.ns8390_offset = ETHERH600_NS8390,
.dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
.ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
.name = "i3 EtherH 600",
.supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
.tx_start_page = ETHERH_TX_START_PAGE,
.stop_page = ETHERH_STOP_PAGE,
};
static struct etherh_data etherlan600a_data = {
.ns8390_offset = ETHERH600_NS8390,
.dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
.ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
.name = "i3 EtherH 600A",
.supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
.tx_start_page = ETHERH_TX_START_PAGE,
.stop_page = ETHERH_STOP_PAGE,
};
static const struct ecard_id etherh_ids[] = {
{ MANU_ANT, PROD_ANT_ETHERM, ðerm_data },
{ MANU_I3, PROD_I3_ETHERLAN500, ðerlan500_data },
{ MANU_I3, PROD_I3_ETHERLAN600, ðerlan600_data },
{ MANU_I3, PROD_I3_ETHERLAN600A, ðerlan600a_data },
{ 0xffff, 0xffff }
};
static struct ecard_driver etherh_driver = {
.probe = etherh_probe,
.remove = __devexit_p(etherh_remove),
.id_table = etherh_ids,
.drv = {
.name = DRV_NAME,
},
};
static int __init etherh_init(void)
{
int i;
for (i = 0; i < 16; i++) {
etherh_regoffsets[i] = i << 2;
etherm_regoffsets[i] = i << 5;
}
return ecard_register_driver(ðerh_driver);
}
static void __exit etherh_exit(void)
{
ecard_remove_driver(ðerh_driver);
}
module_init(etherh_init);
module_exit(etherh_exit);
| gpl-2.0 |
namagi/android_kernel_motorola_msm8960-common | sound/soc/pxa/magician.c | 3026 | 14329 | /*
* SoC audio for HTC Magician
*
* Copyright (c) 2006 Philipp Zabel <philipp.zabel@gmail.com>
*
* based on spitz.c,
* Authors: Liam Girdwood <lrg@slimlogic.co.uk>
* Richard Purdie <richard@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/uda1380.h>
#include <mach/magician.h>
#include <asm/mach-types.h>
#include "../codecs/uda1380.h"
#include "pxa2xx-i2s.h"
#include "pxa-ssp.h"
#define MAGICIAN_MIC 0
#define MAGICIAN_MIC_EXT 1
static int magician_hp_switch;
static int magician_spk_switch = 1;
static int magician_in_sel = MAGICIAN_MIC;
static void magician_ext_control(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
if (magician_spk_switch)
snd_soc_dapm_enable_pin(dapm, "Speaker");
else
snd_soc_dapm_disable_pin(dapm, "Speaker");
if (magician_hp_switch)
snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
else
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
switch (magician_in_sel) {
case MAGICIAN_MIC:
snd_soc_dapm_disable_pin(dapm, "Headset Mic");
snd_soc_dapm_enable_pin(dapm, "Call Mic");
break;
case MAGICIAN_MIC_EXT:
snd_soc_dapm_disable_pin(dapm, "Call Mic");
snd_soc_dapm_enable_pin(dapm, "Headset Mic");
break;
}
snd_soc_dapm_sync(dapm);
}
static int magician_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
mutex_lock(&codec->mutex);
/* check the jack status at stream startup */
magician_ext_control(codec);
mutex_unlock(&codec->mutex);
return 0;
}
/*
* Magician uses SSP port for playback.
*/
static int magician_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int acps, acds, width, rate;
unsigned int div4 = PXA_SSP_CLK_SCDB_4;
int ret = 0;
rate = params_rate(params);
width = snd_pcm_format_physical_width(params_format(params));
/*
* rate = SSPSCLK / (2 * width(16 or 32))
* SSPSCLK = (ACPS / ACDS) / SSPSCLKDIV(div4 or div1)
*/
switch (params_rate(params)) {
case 8000:
/* off by a factor of 2: bug in the PXA27x audio clock? */
acps = 32842000;
switch (width) {
case 16:
/* 513156 Hz ~= _2_ * 8000 Hz * 32 (+0.23%) */
acds = PXA_SSP_CLK_AUDIO_DIV_16;
break;
default: /* 32 */
/* 1026312 Hz ~= _2_ * 8000 Hz * 64 (+0.23%) */
acds = PXA_SSP_CLK_AUDIO_DIV_8;
}
break;
case 11025:
acps = 5622000;
switch (width) {
case 16:
/* 351375 Hz ~= 11025 Hz * 32 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_4;
break;
default: /* 32 */
/* 702750 Hz ~= 11025 Hz * 64 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
}
break;
case 22050:
acps = 5622000;
switch (width) {
case 16:
/* 702750 Hz ~= 22050 Hz * 32 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
break;
default: /* 32 */
/* 1405500 Hz ~= 22050 Hz * 64 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
}
break;
case 44100:
acps = 5622000;
switch (width) {
case 16:
/* 1405500 Hz ~= 44100 Hz * 32 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
break;
default: /* 32 */
/* 2811000 Hz ~= 44100 Hz * 64 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
}
break;
case 48000:
acps = 12235000;
switch (width) {
case 16:
/* 1529375 Hz ~= 48000 Hz * 32 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
break;
default: /* 32 */
/* 3058750 Hz ~= 48000 Hz * 64 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
}
break;
case 96000:
default:
acps = 12235000;
switch (width) {
case 16:
/* 3058750 Hz ~= 96000 Hz * 32 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
break;
default: /* 32 */
/* 6117500 Hz ~= 96000 Hz * 64 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
div4 = PXA_SSP_CLK_SCDB_1;
break;
}
break;
}
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 0, 1, width);
if (ret < 0)
return ret;
/* set audio clock as clock source */
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_AUDIO, 0,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
/* set the SSP audio system clock ACDS divider */
ret = snd_soc_dai_set_clkdiv(cpu_dai,
PXA_SSP_AUDIO_DIV_ACDS, acds);
if (ret < 0)
return ret;
/* set the SSP audio system clock SCDB divider4 */
ret = snd_soc_dai_set_clkdiv(cpu_dai,
PXA_SSP_AUDIO_DIV_SCDB, div4);
if (ret < 0)
return ret;
/* set SSP audio pll clock */
ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, acps);
if (ret < 0)
return ret;
return 0;
}
/*
* Magician uses I2S for capture.
*/
static int magician_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai,
SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai,
SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set the I2S system clock as output */
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops magician_capture_ops = {
.startup = magician_startup,
.hw_params = magician_capture_hw_params,
};
static struct snd_soc_ops magician_playback_ops = {
.startup = magician_startup,
.hw_params = magician_playback_hw_params,
};
static int magician_get_hp(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = magician_hp_switch;
return 0;
}
static int magician_set_hp(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
if (magician_hp_switch == ucontrol->value.integer.value[0])
return 0;
magician_hp_switch = ucontrol->value.integer.value[0];
magician_ext_control(codec);
return 1;
}
static int magician_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = magician_spk_switch;
return 0;
}
static int magician_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
if (magician_spk_switch == ucontrol->value.integer.value[0])
return 0;
magician_spk_switch = ucontrol->value.integer.value[0];
magician_ext_control(codec);
return 1;
}
static int magician_get_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = magician_in_sel;
return 0;
}
static int magician_set_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
if (magician_in_sel == ucontrol->value.integer.value[0])
return 0;
magician_in_sel = ucontrol->value.integer.value[0];
switch (magician_in_sel) {
case MAGICIAN_MIC:
gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 1);
break;
case MAGICIAN_MIC_EXT:
gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 0);
}
return 1;
}
static int magician_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int magician_hp_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value(EGPIO_MAGICIAN_EP_POWER, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int magician_mic_bias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
/* magician machine dapm widgets */
static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", magician_hp_power),
SND_SOC_DAPM_SPK("Speaker", magician_spk_power),
SND_SOC_DAPM_MIC("Call Mic", magician_mic_bias),
SND_SOC_DAPM_MIC("Headset Mic", magician_mic_bias),
};
/* magician machine audio_map */
static const struct snd_soc_dapm_route audio_map[] = {
/* Headphone connected to VOUTL, VOUTR */
{"Headphone Jack", NULL, "VOUTL"},
{"Headphone Jack", NULL, "VOUTR"},
/* Speaker connected to VOUTL, VOUTR */
{"Speaker", NULL, "VOUTL"},
{"Speaker", NULL, "VOUTR"},
/* Mics are connected to VINM */
{"VINM", NULL, "Headset Mic"},
{"VINM", NULL, "Call Mic"},
};
static const char *input_select[] = {"Call Mic", "Headset Mic"};
static const struct soc_enum magician_in_sel_enum =
SOC_ENUM_SINGLE_EXT(2, input_select);
static const struct snd_kcontrol_new uda1380_magician_controls[] = {
SOC_SINGLE_BOOL_EXT("Headphone Switch",
(unsigned long)&magician_hp_switch,
magician_get_hp, magician_set_hp),
SOC_SINGLE_BOOL_EXT("Speaker Switch",
(unsigned long)&magician_spk_switch,
magician_get_spk, magician_set_spk),
SOC_ENUM_EXT("Input Select", magician_in_sel_enum,
magician_get_input, magician_set_input),
};
/*
* Logic for a uda1380 as connected on a HTC Magician
*/
static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int err;
/* NC codec pins */
snd_soc_dapm_nc_pin(dapm, "VOUTLHP");
snd_soc_dapm_nc_pin(dapm, "VOUTRHP");
/* FIXME: is anything connected here? */
snd_soc_dapm_nc_pin(dapm, "VINL");
snd_soc_dapm_nc_pin(dapm, "VINR");
/* Add magician specific controls */
err = snd_soc_add_controls(codec, uda1380_magician_controls,
ARRAY_SIZE(uda1380_magician_controls));
if (err < 0)
return err;
/* Add magician specific widgets */
snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
ARRAY_SIZE(uda1380_dapm_widgets));
/* Set up magician specific audio path interconnects */
snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
snd_soc_dapm_sync(dapm);
return 0;
}
/* magician digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link magician_dai[] = {
{
.name = "uda1380",
.stream_name = "UDA1380 Playback",
.cpu_dai_name = "pxa-ssp-dai.0",
.codec_dai_name = "uda1380-hifi-playback",
.platform_name = "pxa-pcm-audio",
.codec_name = "uda1380-codec.0-0018",
.init = magician_uda1380_init,
.ops = &magician_playback_ops,
},
{
.name = "uda1380",
.stream_name = "UDA1380 Capture",
.cpu_dai_name = "pxa2xx-i2s",
.codec_dai_name = "uda1380-hifi-capture",
.platform_name = "pxa-pcm-audio",
.codec_name = "uda1380-codec.0-0018",
.ops = &magician_capture_ops,
}
};
/* magician audio machine driver */
static struct snd_soc_card snd_soc_card_magician = {
.name = "Magician",
.dai_link = magician_dai,
.num_links = ARRAY_SIZE(magician_dai),
};
static struct platform_device *magician_snd_device;
/*
* FIXME: move into magician board file once merged into the pxa tree
*/
static struct uda1380_platform_data uda1380_info = {
.gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
.gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
.dac_clk = UDA1380_DAC_CLK_WSPLL,
};
static struct i2c_board_info i2c_board_info[] = {
{
I2C_BOARD_INFO("uda1380", 0x18),
.platform_data = &uda1380_info,
},
};
static int __init magician_init(void)
{
int ret;
struct i2c_adapter *adapter;
struct i2c_client *client;
if (!machine_is_magician())
return -ENODEV;
adapter = i2c_get_adapter(0);
if (!adapter)
return -ENODEV;
client = i2c_new_device(adapter, i2c_board_info);
i2c_put_adapter(adapter);
if (!client)
return -ENODEV;
ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
if (ret)
goto err_request_spk;
ret = gpio_request(EGPIO_MAGICIAN_EP_POWER, "EP_POWER");
if (ret)
goto err_request_ep;
ret = gpio_request(EGPIO_MAGICIAN_MIC_POWER, "MIC_POWER");
if (ret)
goto err_request_mic;
ret = gpio_request(EGPIO_MAGICIAN_IN_SEL0, "IN_SEL0");
if (ret)
goto err_request_in_sel0;
ret = gpio_request(EGPIO_MAGICIAN_IN_SEL1, "IN_SEL1");
if (ret)
goto err_request_in_sel1;
gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0);
magician_snd_device = platform_device_alloc("soc-audio", -1);
if (!magician_snd_device) {
ret = -ENOMEM;
goto err_pdev;
}
platform_set_drvdata(magician_snd_device, &snd_soc_card_magician);
ret = platform_device_add(magician_snd_device);
if (ret) {
platform_device_put(magician_snd_device);
goto err_pdev;
}
return 0;
err_pdev:
gpio_free(EGPIO_MAGICIAN_IN_SEL1);
err_request_in_sel1:
gpio_free(EGPIO_MAGICIAN_IN_SEL0);
err_request_in_sel0:
gpio_free(EGPIO_MAGICIAN_MIC_POWER);
err_request_mic:
gpio_free(EGPIO_MAGICIAN_EP_POWER);
err_request_ep:
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
err_request_spk:
return ret;
}
static void __exit magician_exit(void)
{
platform_device_unregister(magician_snd_device);
gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0);
gpio_free(EGPIO_MAGICIAN_IN_SEL1);
gpio_free(EGPIO_MAGICIAN_IN_SEL0);
gpio_free(EGPIO_MAGICIAN_MIC_POWER);
gpio_free(EGPIO_MAGICIAN_EP_POWER);
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
}
module_init(magician_init);
module_exit(magician_exit);
MODULE_AUTHOR("Philipp Zabel");
MODULE_DESCRIPTION("ALSA SoC Magician");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DutchDanny/SensationXL-ICS | drivers/platform/x86/hdaps.c | 3026 | 17097 | /*
* hdaps.c - driver for IBM's Hard Drive Active Protection System
*
* Copyright (C) 2005 Robert Love <rml@novell.com>
* Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
*
* The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
* starting with the R40, T41, and X40. It provides a basic two-axis
* accelerometer and other data, such as the device's temperature.
*
* This driver is based on the document by Mark A. Smith available at
* http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
* and error.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License v2 as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/dmi.h>
#include <linux/jiffies.h>
#include <linux/io.h>
#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
#define HDAPS_NR_PORTS 0x30 /* number of ports: 0x1600 - 0x162f */
#define HDAPS_PORT_STATE 0x1611 /* device state */
#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */
#define HDAPS_PORT_XPOS 0x1614 /* x-axis position */
#define HDAPS_PORT_TEMP1 0x1616 /* device temperature, in Celsius */
#define HDAPS_PORT_YVAR 0x1617 /* y-axis variance (what is this?) */
#define HDAPS_PORT_XVAR 0x1619 /* x-axis variance (what is this?) */
#define HDAPS_PORT_TEMP2 0x161b /* device temperature (again?) */
#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */
#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */
#define STATE_FRESH 0x50 /* accelerometer data is fresh */
#define KEYBD_MASK 0x20 /* set if keyboard activity */
#define MOUSE_MASK 0x40 /* set if mouse activity */
#define KEYBD_ISSET(n) (!! (n & KEYBD_MASK)) /* keyboard used? */
#define MOUSE_ISSET(n) (!! (n & MOUSE_MASK)) /* mouse used? */
#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */
#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */
#define HDAPS_POLL_INTERVAL 50 /* poll for input every 1/20s (50 ms)*/
#define HDAPS_INPUT_FUZZ 4 /* input event threshold */
#define HDAPS_INPUT_FLAT 4
#define HDAPS_X_AXIS (1 << 0)
#define HDAPS_Y_AXIS (1 << 1)
#define HDAPS_BOTH_AXES (HDAPS_X_AXIS | HDAPS_Y_AXIS)
static struct platform_device *pdev;
static struct input_polled_dev *hdaps_idev;
static unsigned int hdaps_invert;
static u8 km_activity;
static int rest_x;
static int rest_y;
static DEFINE_MUTEX(hdaps_mtx);
/*
* __get_latch - Get the value from a given port. Callers must hold hdaps_mtx.
*/
static inline u8 __get_latch(u16 port)
{
return inb(port) & 0xff;
}
/*
* __check_latch - Check a port latch for a given value. Returns zero if the
* port contains the given value. Callers must hold hdaps_mtx.
*/
static inline int __check_latch(u16 port, u8 val)
{
if (__get_latch(port) == val)
return 0;
return -EINVAL;
}
/*
* __wait_latch - Wait up to 100us for a port latch to get a certain value,
* returning zero if the value is obtained. Callers must hold hdaps_mtx.
*/
static int __wait_latch(u16 port, u8 val)
{
unsigned int i;
for (i = 0; i < 20; i++) {
if (!__check_latch(port, val))
return 0;
udelay(5);
}
return -EIO;
}
/*
* __device_refresh - request a refresh from the accelerometer. Does not wait
* for refresh to complete. Callers must hold hdaps_mtx.
*/
static void __device_refresh(void)
{
udelay(200);
if (inb(0x1604) != STATE_FRESH) {
outb(0x11, 0x1610);
outb(0x01, 0x161f);
}
}
/*
* __device_refresh_sync - request a synchronous refresh from the
* accelerometer. We wait for the refresh to complete. Returns zero if
* successful and nonzero on error. Callers must hold hdaps_mtx.
*/
static int __device_refresh_sync(void)
{
__device_refresh();
return __wait_latch(0x1604, STATE_FRESH);
}
/*
* __device_complete - indicate to the accelerometer that we are done reading
* data, and then initiate an async refresh. Callers must hold hdaps_mtx.
*/
static inline void __device_complete(void)
{
inb(0x161f);
inb(0x1604);
__device_refresh();
}
/*
* hdaps_readb_one - reads a byte from a single I/O port, placing the value in
* the given pointer. Returns zero on success or a negative error on failure.
* Can sleep.
*/
static int hdaps_readb_one(unsigned int port, u8 *val)
{
int ret;
mutex_lock(&hdaps_mtx);
/* do a sync refresh -- we need to be sure that we read fresh data */
ret = __device_refresh_sync();
if (ret)
goto out;
*val = inb(port);
__device_complete();
out:
mutex_unlock(&hdaps_mtx);
return ret;
}
/* __hdaps_read_pair - internal lockless helper for hdaps_read_pair(). */
static int __hdaps_read_pair(unsigned int port1, unsigned int port2,
int *x, int *y)
{
/* do a sync refresh -- we need to be sure that we read fresh data */
if (__device_refresh_sync())
return -EIO;
*y = inw(port2);
*x = inw(port1);
km_activity = inb(HDAPS_PORT_KMACT);
__device_complete();
/* hdaps_invert is a bitvector to negate the axes */
if (hdaps_invert & HDAPS_X_AXIS)
*x = -*x;
if (hdaps_invert & HDAPS_Y_AXIS)
*y = -*y;
return 0;
}
/*
* hdaps_read_pair - reads the values from a pair of ports, placing the values
* in the given pointers. Returns zero on success. Can sleep.
*/
static int hdaps_read_pair(unsigned int port1, unsigned int port2,
int *val1, int *val2)
{
int ret;
mutex_lock(&hdaps_mtx);
ret = __hdaps_read_pair(port1, port2, val1, val2);
mutex_unlock(&hdaps_mtx);
return ret;
}
/*
* hdaps_device_init - initialize the accelerometer. Returns zero on success
* and negative error code on failure. Can sleep.
*/
static int hdaps_device_init(void)
{
int total, ret = -ENXIO;
mutex_lock(&hdaps_mtx);
outb(0x13, 0x1610);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
/*
* Most ThinkPads return 0x01.
*
* Others--namely the R50p, T41p, and T42p--return 0x03. These laptops
* have "inverted" axises.
*
* The 0x02 value occurs when the chip has been previously initialized.
*/
if (__check_latch(0x1611, 0x03) &&
__check_latch(0x1611, 0x02) &&
__check_latch(0x1611, 0x01))
goto out;
printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x)\n",
__get_latch(0x1611));
outb(0x17, 0x1610);
outb(0x81, 0x1611);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
if (__wait_latch(0x1611, 0x00))
goto out;
if (__wait_latch(0x1612, 0x60))
goto out;
if (__wait_latch(0x1613, 0x00))
goto out;
outb(0x14, 0x1610);
outb(0x01, 0x1611);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
outb(0x10, 0x1610);
outb(0xc8, 0x1611);
outb(0x00, 0x1612);
outb(0x02, 0x1613);
outb(0x01, 0x161f);
if (__wait_latch(0x161f, 0x00))
goto out;
if (__device_refresh_sync())
goto out;
if (__wait_latch(0x1611, 0x00))
goto out;
/* we have done our dance, now let's wait for the applause */
for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
int x, y;
/* a read of the device helps push it into action */
__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
if (!__wait_latch(0x1611, 0x02)) {
ret = 0;
break;
}
msleep(INIT_WAIT_MSECS);
}
out:
mutex_unlock(&hdaps_mtx);
return ret;
}
/* Device model stuff */
static int hdaps_probe(struct platform_device *dev)
{
int ret;
ret = hdaps_device_init();
if (ret)
return ret;
pr_info("device successfully initialized\n");
return 0;
}
static int hdaps_resume(struct platform_device *dev)
{
return hdaps_device_init();
}
static struct platform_driver hdaps_driver = {
.probe = hdaps_probe,
.resume = hdaps_resume,
.driver = {
.name = "hdaps",
.owner = THIS_MODULE,
},
};
/*
* hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_mtx.
*/
static void hdaps_calibrate(void)
{
__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
}
static void hdaps_mousedev_poll(struct input_polled_dev *dev)
{
struct input_dev *input_dev = dev->input;
int x, y;
mutex_lock(&hdaps_mtx);
if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
goto out;
input_report_abs(input_dev, ABS_X, x - rest_x);
input_report_abs(input_dev, ABS_Y, y - rest_y);
input_sync(input_dev);
out:
mutex_unlock(&hdaps_mtx);
}
/* Sysfs Files */
static ssize_t hdaps_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, x, y;
ret = hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
if (ret)
return ret;
return sprintf(buf, "(%d,%d)\n", x, y);
}
static ssize_t hdaps_variance_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, x, y;
ret = hdaps_read_pair(HDAPS_PORT_XVAR, HDAPS_PORT_YVAR, &x, &y);
if (ret)
return ret;
return sprintf(buf, "(%d,%d)\n", x, y);
}
static ssize_t hdaps_temp1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 temp;
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP1, &temp);
if (ret < 0)
return ret;
return sprintf(buf, "%u\n", temp);
}
static ssize_t hdaps_temp2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 temp;
int ret;
ret = hdaps_readb_one(HDAPS_PORT_TEMP2, &temp);
if (ret < 0)
return ret;
return sprintf(buf, "%u\n", temp);
}
static ssize_t hdaps_keyboard_activity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", KEYBD_ISSET(km_activity));
}
static ssize_t hdaps_mouse_activity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", MOUSE_ISSET(km_activity));
}
static ssize_t hdaps_calibrate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "(%d,%d)\n", rest_x, rest_y);
}
static ssize_t hdaps_calibrate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
mutex_lock(&hdaps_mtx);
hdaps_calibrate();
mutex_unlock(&hdaps_mtx);
return count;
}
static ssize_t hdaps_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", hdaps_invert);
}
static ssize_t hdaps_invert_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int invert;
if (sscanf(buf, "%d", &invert) != 1 ||
invert < 0 || invert > HDAPS_BOTH_AXES)
return -EINVAL;
hdaps_invert = invert;
hdaps_calibrate();
return count;
}
static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL);
static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL);
static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL);
static DEVICE_ATTR(temp2, 0444, hdaps_temp2_show, NULL);
static DEVICE_ATTR(keyboard_activity, 0444, hdaps_keyboard_activity_show, NULL);
static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL);
static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store);
static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store);
static struct attribute *hdaps_attributes[] = {
&dev_attr_position.attr,
&dev_attr_variance.attr,
&dev_attr_temp1.attr,
&dev_attr_temp2.attr,
&dev_attr_keyboard_activity.attr,
&dev_attr_mouse_activity.attr,
&dev_attr_calibrate.attr,
&dev_attr_invert.attr,
NULL,
};
static struct attribute_group hdaps_attribute_group = {
.attrs = hdaps_attributes,
};
/* Module stuff */
/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
static int __init hdaps_dmi_match(const struct dmi_system_id *id)
{
pr_info("%s detected\n", id->ident);
return 1;
}
/* hdaps_dmi_match_invert - found an inverted match. */
static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
{
hdaps_invert = (unsigned long)id->driver_data;
pr_info("inverting axis (%u) readings\n", hdaps_invert);
return hdaps_dmi_match(id);
}
#define HDAPS_DMI_MATCH_INVERT(vendor, model, axes) { \
.ident = vendor " " model, \
.callback = hdaps_dmi_match_invert, \
.driver_data = (void *)axes, \
.matches = { \
DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
DMI_MATCH(DMI_PRODUCT_VERSION, model) \
} \
}
#define HDAPS_DMI_MATCH_NORMAL(vendor, model) \
HDAPS_DMI_MATCH_INVERT(vendor, model, 0)
/* Note that HDAPS_DMI_MATCH_NORMAL("ThinkPad T42") would match
"ThinkPad T42p", so the order of the entries matters.
If your ThinkPad is not recognized, please update to latest
BIOS. This is especially the case for some R52 ThinkPads. */
static struct dmi_system_id __initdata hdaps_whitelist[] = {
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R52"),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad R61i", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad R61", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T41p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T41"),
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad T42p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T42"),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad T43"),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T400", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T60", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61p", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad T61", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad X40"),
HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad X41", HDAPS_Y_AXIS),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X60", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X61s", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad X61", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad Z60m"),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad Z61m", HDAPS_BOTH_AXES),
HDAPS_DMI_MATCH_INVERT("LENOVO", "ThinkPad Z61p", HDAPS_BOTH_AXES),
{ .ident = NULL }
};
static int __init hdaps_init(void)
{
struct input_dev *idev;
int ret;
if (!dmi_check_system(hdaps_whitelist)) {
pr_warn("supported laptop not found!\n");
ret = -ENODEV;
goto out;
}
if (!request_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS, "hdaps")) {
ret = -ENXIO;
goto out;
}
ret = platform_driver_register(&hdaps_driver);
if (ret)
goto out_region;
pdev = platform_device_register_simple("hdaps", -1, NULL, 0);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
goto out_driver;
}
ret = sysfs_create_group(&pdev->dev.kobj, &hdaps_attribute_group);
if (ret)
goto out_device;
hdaps_idev = input_allocate_polled_device();
if (!hdaps_idev) {
ret = -ENOMEM;
goto out_group;
}
hdaps_idev->poll = hdaps_mousedev_poll;
hdaps_idev->poll_interval = HDAPS_POLL_INTERVAL;
/* initial calibrate for the input device */
hdaps_calibrate();
/* initialize the input class */
idev = hdaps_idev->input;
idev->name = "hdaps";
idev->phys = "isa1600/input0";
idev->id.bustype = BUS_ISA;
idev->dev.parent = &pdev->dev;
idev->evbit[0] = BIT_MASK(EV_ABS);
input_set_abs_params(idev, ABS_X,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
input_set_abs_params(idev, ABS_Y,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
ret = input_register_polled_device(hdaps_idev);
if (ret)
goto out_idev;
pr_info("driver successfully loaded\n");
return 0;
out_idev:
input_free_polled_device(hdaps_idev);
out_group:
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
out_device:
platform_device_unregister(pdev);
out_driver:
platform_driver_unregister(&hdaps_driver);
out_region:
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
out:
pr_warn("driver init failed (ret=%d)!\n", ret);
return ret;
}
static void __exit hdaps_exit(void)
{
input_unregister_polled_device(hdaps_idev);
input_free_polled_device(hdaps_idev);
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
platform_device_unregister(pdev);
platform_driver_unregister(&hdaps_driver);
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
pr_info("driver unloaded\n");
}
module_init(hdaps_init);
module_exit(hdaps_exit);
module_param_named(invert, hdaps_invert, int, 0);
MODULE_PARM_DESC(invert, "invert data along each axis. 1 invert x-axis, "
"2 invert y-axis, 3 invert both axes.");
MODULE_AUTHOR("Robert Love");
MODULE_DESCRIPTION("IBM Hard Drive Active Protection System (HDAPS) driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
sjkoon/original-sgs3-soonjung | drivers/video/hgafb.c | 3026 | 17060 | /*
* linux/drivers/video/hgafb.c -- Hercules graphics adaptor frame buffer device
*
* Created 25 Nov 1999 by Ferenc Bakonyi (fero@drama.obuda.kando.hu)
* Based on skeletonfb.c by Geert Uytterhoeven and
* mdacon.c by Andrew Apted
*
* History:
*
* - Revision 0.1.8 (23 Oct 2002): Ported to new framebuffer api.
*
* - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards
* being detected as Hercules. (Paul G.)
* - Revision 0.1.6 (17 Aug 2000): new style structs
* documentation
* - Revision 0.1.5 (13 Mar 2000): spinlocks instead of saveflags();cli();etc
* minor fixes
* - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for
* HGA-only systems
* - Revision 0.1.3 (22 Jan 2000): modified for the new fb_info structure
* screen is cleared after rmmod
* virtual resolutions
* module parameter 'nologo={0|1}'
* the most important: boot logo :)
* - Revision 0.1.0 (6 Dec 1999): faster scrolling and minor fixes
* - First release (25 Nov 1999)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/vga.h>
#if 0
#define DPRINTK(args...) printk(KERN_DEBUG __FILE__": " ##args)
#else
#define DPRINTK(args...)
#endif
#if 0
#define CHKINFO(ret) if (info != &fb_info) { printk(KERN_DEBUG __FILE__": This should never happen, line:%d \n", __LINE__); return ret; }
#else
#define CHKINFO(ret)
#endif
/* Description of the hardware layout */
static void __iomem *hga_vram; /* Base of video memory */
static unsigned long hga_vram_len; /* Size of video memory */
#define HGA_ROWADDR(row) ((row%4)*8192 + (row>>2)*90)
#define HGA_TXT 0
#define HGA_GFX 1
static inline u8 __iomem * rowaddr(struct fb_info *info, u_int row)
{
return info->screen_base + HGA_ROWADDR(row);
}
static int hga_mode = -1; /* 0 = txt, 1 = gfx mode */
static enum { TYPE_HERC, TYPE_HERCPLUS, TYPE_HERCCOLOR } hga_type;
static char *hga_type_name;
#define HGA_INDEX_PORT 0x3b4 /* Register select port */
#define HGA_VALUE_PORT 0x3b5 /* Register value port */
#define HGA_MODE_PORT 0x3b8 /* Mode control port */
#define HGA_STATUS_PORT 0x3ba /* Status and Config port */
#define HGA_GFX_PORT 0x3bf /* Graphics control port */
/* HGA register values */
#define HGA_CURSOR_BLINKING 0x00
#define HGA_CURSOR_OFF 0x20
#define HGA_CURSOR_SLOWBLINK 0x60
#define HGA_MODE_GRAPHICS 0x02
#define HGA_MODE_VIDEO_EN 0x08
#define HGA_MODE_BLINK_EN 0x20
#define HGA_MODE_GFX_PAGE1 0x80
#define HGA_STATUS_HSYNC 0x01
#define HGA_STATUS_VSYNC 0x80
#define HGA_STATUS_VIDEO 0x08
#define HGA_CONFIG_COL132 0x08
#define HGA_GFX_MODE_EN 0x01
#define HGA_GFX_PAGE_EN 0x02
/* Global locks */
static DEFINE_SPINLOCK(hga_reg_lock);
/* Framebuffer driver structures */
static struct fb_var_screeninfo hga_default_var __devinitdata = {
.xres = 720,
.yres = 348,
.xres_virtual = 720,
.yres_virtual = 348,
.bits_per_pixel = 1,
.red = {0, 1, 0},
.green = {0, 1, 0},
.blue = {0, 1, 0},
.transp = {0, 0, 0},
.height = -1,
.width = -1,
};
static struct fb_fix_screeninfo hga_fix __devinitdata = {
.id = "HGA",
.type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
.visual = FB_VISUAL_MONO10,
.xpanstep = 8,
.ypanstep = 8,
.line_length = 90,
.accel = FB_ACCEL_NONE
};
/* Don't assume that tty1 will be the initial current console. */
static int release_io_port = 0;
static int release_io_ports = 0;
static int nologo = 0;
/* -------------------------------------------------------------------------
*
* Low level hardware functions
*
* ------------------------------------------------------------------------- */
static void write_hga_b(unsigned int val, unsigned char reg)
{
outb_p(reg, HGA_INDEX_PORT);
outb_p(val, HGA_VALUE_PORT);
}
static void write_hga_w(unsigned int val, unsigned char reg)
{
outb_p(reg, HGA_INDEX_PORT); outb_p(val >> 8, HGA_VALUE_PORT);
outb_p(reg+1, HGA_INDEX_PORT); outb_p(val & 0xff, HGA_VALUE_PORT);
}
static int test_hga_b(unsigned char val, unsigned char reg)
{
outb_p(reg, HGA_INDEX_PORT);
outb (val, HGA_VALUE_PORT);
udelay(20); val = (inb_p(HGA_VALUE_PORT) == val);
return val;
}
static void hga_clear_screen(void)
{
unsigned char fillchar = 0xbf; /* magic */
unsigned long flags;
spin_lock_irqsave(&hga_reg_lock, flags);
if (hga_mode == HGA_TXT)
fillchar = ' ';
else if (hga_mode == HGA_GFX)
fillchar = 0x00;
spin_unlock_irqrestore(&hga_reg_lock, flags);
if (fillchar != 0xbf)
memset_io(hga_vram, fillchar, hga_vram_len);
}
static void hga_txt_mode(void)
{
unsigned long flags;
spin_lock_irqsave(&hga_reg_lock, flags);
outb_p(HGA_MODE_VIDEO_EN | HGA_MODE_BLINK_EN, HGA_MODE_PORT);
outb_p(0x00, HGA_GFX_PORT);
outb_p(0x00, HGA_STATUS_PORT);
write_hga_b(0x61, 0x00); /* horizontal total */
write_hga_b(0x50, 0x01); /* horizontal displayed */
write_hga_b(0x52, 0x02); /* horizontal sync pos */
write_hga_b(0x0f, 0x03); /* horizontal sync width */
write_hga_b(0x19, 0x04); /* vertical total */
write_hga_b(0x06, 0x05); /* vertical total adjust */
write_hga_b(0x19, 0x06); /* vertical displayed */
write_hga_b(0x19, 0x07); /* vertical sync pos */
write_hga_b(0x02, 0x08); /* interlace mode */
write_hga_b(0x0d, 0x09); /* maximum scanline */
write_hga_b(0x0c, 0x0a); /* cursor start */
write_hga_b(0x0d, 0x0b); /* cursor end */
write_hga_w(0x0000, 0x0c); /* start address */
write_hga_w(0x0000, 0x0e); /* cursor location */
hga_mode = HGA_TXT;
spin_unlock_irqrestore(&hga_reg_lock, flags);
}
static void hga_gfx_mode(void)
{
unsigned long flags;
spin_lock_irqsave(&hga_reg_lock, flags);
outb_p(0x00, HGA_STATUS_PORT);
outb_p(HGA_GFX_MODE_EN, HGA_GFX_PORT);
outb_p(HGA_MODE_VIDEO_EN | HGA_MODE_GRAPHICS, HGA_MODE_PORT);
write_hga_b(0x35, 0x00); /* horizontal total */
write_hga_b(0x2d, 0x01); /* horizontal displayed */
write_hga_b(0x2e, 0x02); /* horizontal sync pos */
write_hga_b(0x07, 0x03); /* horizontal sync width */
write_hga_b(0x5b, 0x04); /* vertical total */
write_hga_b(0x02, 0x05); /* vertical total adjust */
write_hga_b(0x57, 0x06); /* vertical displayed */
write_hga_b(0x57, 0x07); /* vertical sync pos */
write_hga_b(0x02, 0x08); /* interlace mode */
write_hga_b(0x03, 0x09); /* maximum scanline */
write_hga_b(0x00, 0x0a); /* cursor start */
write_hga_b(0x00, 0x0b); /* cursor end */
write_hga_w(0x0000, 0x0c); /* start address */
write_hga_w(0x0000, 0x0e); /* cursor location */
hga_mode = HGA_GFX;
spin_unlock_irqrestore(&hga_reg_lock, flags);
}
static void hga_show_logo(struct fb_info *info)
{
/*
void __iomem *dest = hga_vram;
char *logo = linux_logo_bw;
int x, y;
for (y = 134; y < 134 + 80 ; y++) * this needs some cleanup *
for (x = 0; x < 10 ; x++)
writeb(~*(logo++),(dest + HGA_ROWADDR(y) + x + 40));
*/
}
static void hga_pan(unsigned int xoffset, unsigned int yoffset)
{
unsigned int base;
unsigned long flags;
base = (yoffset / 8) * 90 + xoffset;
spin_lock_irqsave(&hga_reg_lock, flags);
write_hga_w(base, 0x0c); /* start address */
spin_unlock_irqrestore(&hga_reg_lock, flags);
DPRINTK("hga_pan: base:%d\n", base);
}
static void hga_blank(int blank_mode)
{
unsigned long flags;
spin_lock_irqsave(&hga_reg_lock, flags);
if (blank_mode) {
outb_p(0x00, HGA_MODE_PORT); /* disable video */
} else {
outb_p(HGA_MODE_VIDEO_EN | HGA_MODE_GRAPHICS, HGA_MODE_PORT);
}
spin_unlock_irqrestore(&hga_reg_lock, flags);
}
static int __devinit hga_card_detect(void)
{
int count = 0;
void __iomem *p, *q;
unsigned short p_save, q_save;
hga_vram_len = 0x08000;
hga_vram = ioremap(0xb0000, hga_vram_len);
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
if (request_region(0x3bf, 1, "hgafb"))
release_io_port = 1;
/* do a memory check */
p = hga_vram;
q = hga_vram + 0x01000;
p_save = readw(p); q_save = readw(q);
writew(0xaa55, p); if (readw(p) == 0xaa55) count++;
writew(0x55aa, p); if (readw(p) == 0x55aa) count++;
writew(p_save, p);
if (count != 2)
goto error;
/* Ok, there is definitely a card registering at the correct
* memory location, so now we do an I/O port test.
*/
if (!test_hga_b(0x66, 0x0f)) /* cursor low register */
goto error;
if (!test_hga_b(0x99, 0x0f)) /* cursor low register */
goto error;
/* See if the card is a Hercules, by checking whether the vsync
* bit of the status register is changing. This test lasts for
* approximately 1/10th of a second.
*/
p_save = q_save = inb_p(HGA_STATUS_PORT) & HGA_STATUS_VSYNC;
for (count=0; count < 50000 && p_save == q_save; count++) {
q_save = inb(HGA_STATUS_PORT) & HGA_STATUS_VSYNC;
udelay(2);
}
if (p_save == q_save)
goto error;
switch (inb_p(HGA_STATUS_PORT) & 0x70) {
case 0x10:
hga_type = TYPE_HERCPLUS;
hga_type_name = "HerculesPlus";
break;
case 0x50:
hga_type = TYPE_HERCCOLOR;
hga_type_name = "HerculesColor";
break;
default:
hga_type = TYPE_HERC;
hga_type_name = "Hercules";
break;
}
return 1;
error:
if (release_io_ports)
release_region(0x3b0, 12);
if (release_io_port)
release_region(0x3bf, 1);
return 0;
}
/**
* hgafb_open - open the framebuffer device
* @info:pointer to fb_info object containing info for current hga board
* @int:open by console system or userland.
*/
static int hgafb_open(struct fb_info *info, int init)
{
hga_gfx_mode();
hga_clear_screen();
if (!nologo) hga_show_logo(info);
return 0;
}
/**
* hgafb_open - open the framebuffer device
* @info:pointer to fb_info object containing info for current hga board
* @int:open by console system or userland.
*/
static int hgafb_release(struct fb_info *info, int init)
{
hga_txt_mode();
hga_clear_screen();
return 0;
}
/**
* hgafb_setcolreg - set color registers
* @regno:register index to set
* @red:red value, unused
* @green:green value, unused
* @blue:blue value, unused
* @transp:transparency value, unused
* @info:unused
*
* This callback function is used to set the color registers of a HGA
* board. Since we have only two fixed colors only @regno is checked.
* A zero is returned on success and 1 for failure.
*/
static int hgafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
if (regno > 1)
return 1;
return 0;
}
/**
* hga_pan_display - pan or wrap the display
* @var:contains new xoffset, yoffset and vmode values
* @info:pointer to fb_info object containing info for current hga board
*
* This function looks only at xoffset, yoffset and the %FB_VMODE_YWRAP
* flag in @var. If input parameters are correct it calls hga_pan() to
* program the hardware. @info->var is updated to the new values.
* A zero is returned on success and %-EINVAL for failure.
*/
static int hgafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset < 0 ||
var->yoffset >= info->var.yres_virtual ||
var->xoffset)
return -EINVAL;
} else {
if (var->xoffset + var->xres > info->var.xres_virtual
|| var->yoffset + var->yres > info->var.yres_virtual
|| var->yoffset % 8)
return -EINVAL;
}
hga_pan(var->xoffset, var->yoffset);
return 0;
}
/**
* hgafb_blank - (un)blank the screen
* @blank_mode:blanking method to use
* @info:unused
*
* Blank the screen if blank_mode != 0, else unblank.
* Implements VESA suspend and powerdown modes on hardware that supports
* disabling hsync/vsync:
* @blank_mode == 2 means suspend vsync,
* @blank_mode == 3 means suspend hsync,
* @blank_mode == 4 means powerdown.
*/
static int hgafb_blank(int blank_mode, struct fb_info *info)
{
hga_blank(blank_mode);
return 0;
}
/*
* Accel functions
*/
static void hgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
u_int rows, y;
u8 __iomem *dest;
y = rect->dy;
for (rows = rect->height; rows--; y++) {
dest = rowaddr(info, y) + (rect->dx >> 3);
switch (rect->rop) {
case ROP_COPY:
memset_io(dest, rect->color, (rect->width >> 3));
break;
case ROP_XOR:
fb_writeb(~(fb_readb(dest)), dest);
break;
}
}
}
static void hgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
u_int rows, y1, y2;
u8 __iomem *src;
u8 __iomem *dest;
if (area->dy <= area->sy) {
y1 = area->sy;
y2 = area->dy;
for (rows = area->height; rows--; ) {
src = rowaddr(info, y1) + (area->sx >> 3);
dest = rowaddr(info, y2) + (area->dx >> 3);
memmove(dest, src, (area->width >> 3));
y1++;
y2++;
}
} else {
y1 = area->sy + area->height - 1;
y2 = area->dy + area->height - 1;
for (rows = area->height; rows--;) {
src = rowaddr(info, y1) + (area->sx >> 3);
dest = rowaddr(info, y2) + (area->dx >> 3);
memmove(dest, src, (area->width >> 3));
y1--;
y2--;
}
}
}
static void hgafb_imageblit(struct fb_info *info, const struct fb_image *image)
{
u8 __iomem *dest;
u8 *cdat = (u8 *) image->data;
u_int rows, y = image->dy;
u_int x;
u8 d;
for (rows = image->height; rows--; y++) {
for (x = 0; x < image->width; x+= 8) {
d = *cdat++;
dest = rowaddr(info, y) + ((image->dx + x)>> 3);
fb_writeb(d, dest);
}
}
}
static struct fb_ops hgafb_ops = {
.owner = THIS_MODULE,
.fb_open = hgafb_open,
.fb_release = hgafb_release,
.fb_setcolreg = hgafb_setcolreg,
.fb_pan_display = hgafb_pan_display,
.fb_blank = hgafb_blank,
.fb_fillrect = hgafb_fillrect,
.fb_copyarea = hgafb_copyarea,
.fb_imageblit = hgafb_imageblit,
};
/* ------------------------------------------------------------------------- *
*
* Functions in fb_info
*
* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
/*
* Initialization
*/
static int __devinit hgafb_probe(struct platform_device *pdev)
{
struct fb_info *info;
if (! hga_card_detect()) {
printk(KERN_INFO "hgafb: HGA card not detected.\n");
if (hga_vram)
iounmap(hga_vram);
return -EINVAL;
}
printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
hga_type_name, hga_vram_len/1024);
info = framebuffer_alloc(0, &pdev->dev);
if (!info) {
iounmap(hga_vram);
return -ENOMEM;
}
hga_fix.smem_start = (unsigned long)hga_vram;
hga_fix.smem_len = hga_vram_len;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
info->var = hga_default_var;
info->fix = hga_fix;
info->monspecs.hfmin = 0;
info->monspecs.hfmax = 0;
info->monspecs.vfmin = 10000;
info->monspecs.vfmax = 10000;
info->monspecs.dpms = 0;
info->fbops = &hgafb_ops;
info->screen_base = hga_vram;
if (register_framebuffer(info) < 0) {
framebuffer_release(info);
iounmap(hga_vram);
return -EINVAL;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
platform_set_drvdata(pdev, info);
return 0;
}
static int __devexit hgafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
hga_txt_mode();
hga_clear_screen();
if (info) {
unregister_framebuffer(info);
framebuffer_release(info);
}
iounmap(hga_vram);
if (release_io_ports)
release_region(0x3b0, 12);
if (release_io_port)
release_region(0x3bf, 1);
return 0;
}
static struct platform_driver hgafb_driver = {
.probe = hgafb_probe,
.remove = __devexit_p(hgafb_remove),
.driver = {
.name = "hgafb",
},
};
static struct platform_device *hgafb_device;
static int __init hgafb_init(void)
{
int ret;
if (fb_get_options("hgafb", NULL))
return -ENODEV;
ret = platform_driver_register(&hgafb_driver);
if (!ret) {
hgafb_device = platform_device_register_simple("hgafb", 0, NULL, 0);
if (IS_ERR(hgafb_device)) {
platform_driver_unregister(&hgafb_driver);
ret = PTR_ERR(hgafb_device);
}
}
return ret;
}
static void __exit hgafb_exit(void)
{
platform_device_unregister(hgafb_device);
platform_driver_unregister(&hgafb_driver);
}
/* -------------------------------------------------------------------------
*
* Modularization
*
* ------------------------------------------------------------------------- */
MODULE_AUTHOR("Ferenc Bakonyi (fero@drama.obuda.kando.hu)");
MODULE_DESCRIPTION("FBDev driver for Hercules Graphics Adaptor");
MODULE_LICENSE("GPL");
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo if != 0 (default=0)");
module_init(hgafb_init);
module_exit(hgafb_exit);
| gpl-2.0 |
poitee/LTEOMAPNEX | drivers/mtd/chips/cfi_cmdset_0001.c | 3026 | 74188 | /*
* Common Flash Interface support:
* Intel Extended Vendor Command Set (ID 0x0001)
*
* (C) 2000 Red Hat. GPL'd
*
*
* 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
* - completely revamped method functions so they are aware and
* independent of the flash geometry (buswidth, interleave, etc.)
* - scalability vs code size is completely set at compile-time
* (see include/linux/mtd/cfi.h for selection)
* - optimized write buffer method
* 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
* - reworked lock/unlock/erase support for var size flash
* 21/03/2007 Rodolfo Giometti <giometti@linux.it>
* - auto unlock sectors on resume for auto locking flash on power up
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/bitmap.h>
#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
// debugging, turns off buffer write mode if set to 1
#define FORCE_WORD_WRITE 0
/* Intel chips */
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
/* Atmel chips */
#define AT49BV640D 0x02de
#define AT49BV640DT 0x02db
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_intelext_sync (struct mtd_info *);
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
uint64_t len);
#ifdef CONFIG_MTD_OTP
static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
struct otp_info *, size_t);
static int cfi_intelext_get_user_prot_info (struct mtd_info *,
struct otp_info *, size_t);
#endif
static int cfi_intelext_suspend (struct mtd_info *);
static void cfi_intelext_resume (struct mtd_info *);
static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
static void cfi_intelext_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
/*
* *********** SETUP AND PROBE BITS ***********
*/
static struct mtd_chip_driver cfi_intelext_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_intelext_destroy,
.name = "cfi_cmdset_0001",
.module = THIS_MODULE
};
/* #define DEBUG_LOCK_BITS */
/* #define DEBUG_CFI_FEATURES */
#ifdef DEBUG_CFI_FEATURES
static void cfi_tell_features(struct cfi_pri_intelext *extp)
{
int i;
printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
for (i=11; i<32; i++) {
if (extp->FeatureSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
for (i=1; i<8; i++) {
if (extp->SuspendCmdSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
for (i=2; i<3; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
for (i=6; i<16; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
if (extp->VppOptimal)
printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
}
#endif
/* Atmel chips don't use the same PRI format as Intel chips */
static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
struct cfi_pri_atmel atmel_pri;
uint32_t features = 0;
/* Reverse byteswapping */
extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
memcpy(&atmel_pri, extp, sizeof(atmel_pri));
memset((char *)extp + 5, 0, sizeof(*extp) - 5);
printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
if (atmel_pri.Features & 0x01) /* chip erase supported */
features |= (1<<0);
if (atmel_pri.Features & 0x02) /* erase suspend supported */
features |= (1<<1);
if (atmel_pri.Features & 0x04) /* program suspend supported */
features |= (1<<2);
if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
features |= (1<<9);
if (atmel_pri.Features & 0x20) /* page mode read supported */
features |= (1<<7);
if (atmel_pri.Features & 0x40) /* queued erase supported */
features |= (1<<4);
if (atmel_pri.Features & 0x80) /* Protection bits supported */
features |= (1<<6);
extp->FeatureSupport = features;
/* burst write mode not supported */
cfi->cfiq->BufWriteTimeoutTyp = 0;
cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
cfip->FeatureSupport |= (1 << 5);
mtd->flags |= MTD_POWERUP_LOCK;
}
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
"erase on write disabled.\n");
extp->SuspendCmdSupport &= ~1;
}
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
static void fixup_no_write_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
if (cfip && (cfip->FeatureSupport&4)) {
cfip->FeatureSupport &= ~4;
printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
}
}
#endif
static void fixup_st_m28w320ct(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
}
static void fixup_st_m28w320cb(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/* Note this is done after the region info is endian swapped */
cfi->cfiq->EraseRegionInfo[1] =
(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
};
static void fixup_use_point(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (!mtd->point && map_is_linear(map)) {
mtd->point = cfi_intelext_point;
mtd->unpoint = cfi_intelext_unpoint;
}
}
static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
printk(KERN_INFO "Using buffer write method\n" );
mtd->write = cfi_intelext_write_buffers;
mtd->writev = cfi_intelext_writev;
}
}
/*
* Some chips power-up with all sectors locked by default.
*/
static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
if (cfip->FeatureSupport&32) {
printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
mtd->flags |= MTD_POWERUP_LOCK;
}
}
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
#endif
#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
#endif
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
{ 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
{ CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
{ CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
{ CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
{ 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
/* The CFI vendor ids and the JEDEC vendor IDs appear
* to be common. It is like the devices id's are as
* well. This table is to pick all cases where
* we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
{ 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_intelext *extp)
{
if (cfi->mfr == CFI_MFR_INTEL &&
cfi->id == PF38F4476 && extp->MinorVersion == '3')
extp->MinorVersion = '1';
}
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
if (!extp)
return NULL;
cfi_fixup_major_minor(cfi, extp);
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
kfree(extp);
return NULL;
}
/* Do some byteswapping if necessary */
extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
if (extp->MinorVersion >= '0') {
extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
}
if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
extra_size += extp->extra[extra_size - 1];
}
if (extp->MinorVersion >= '3') {
int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
nb_parts = extp->extra[extra_size - 1];
/* skip the sizeof(partregion) field in CFI 1.4 */
if (extp->MinorVersion >= '4')
extra_size += 2;
for (i = 0; i < nb_parts; i++) {
struct cfi_intelext_regioninfo *rinfo;
rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
extra_size += sizeof(*rinfo);
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
extra_size += (rinfo->NumBlockTypes - 1)
* sizeof(struct cfi_intelext_blockinfo);
}
if (extp->MinorVersion >= '4')
extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
if (extp_size < sizeof(*extp) + extra_size) {
need_more:
extp_size = sizeof(*extp) + extra_size;
kfree(extp);
if (extp_size > 4096) {
printk(KERN_ERR
"%s: cfi_pri_intelext is too fat\n",
__func__);
return NULL;
}
goto again;
}
}
return extp;
}
struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_info *mtd;
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd) {
printk(KERN_ERR "Failed to allocate memory for MTD device\n");
return NULL;
}
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
mtd->erase = cfi_intelext_erase_varsize;
mtd->read = cfi_intelext_read;
mtd->write = cfi_intelext_write_words;
mtd->sync = cfi_intelext_sync;
mtd->lock = cfi_intelext_lock;
mtd->unlock = cfi_intelext_unlock;
mtd->is_locked = cfi_intelext_is_locked;
mtd->suspend = cfi_intelext_suspend;
mtd->resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
if (cfi->cfi_mode == CFI_MODE_CFI) {
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure. So we read the feature
* table from it.
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_intelext *extp;
extp = read_pri_intelext(map, adr);
if (!extp) {
kfree(mtd);
return NULL;
}
/* Install our own private info structure */
cfi->cmdset_priv = extp;
cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
#endif
if(extp->SuspendCmdSupport & 1) {
printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
}
}
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
/* Apply jedec specific fixups */
cfi_fixup(mtd, jedec_fixup_table);
}
/* Apply generic fixups */
cfi_fixup(mtd, fixup_table);
for (i=0; i< cfi->numchips; i++) {
if (cfi->cfiq->WordWriteTimeoutTyp)
cfi->chips[i].word_write_time =
1<<cfi->cfiq->WordWriteTimeoutTyp;
else
cfi->chips[i].word_write_time = 50000;
if (cfi->cfiq->BufWriteTimeoutTyp)
cfi->chips[i].buffer_write_time =
1<<cfi->cfiq->BufWriteTimeoutTyp;
/* No default; if it isn't specified, we won't use it */
if (cfi->cfiq->BlockEraseTimeoutTyp)
cfi->chips[i].erase_time =
1000<<cfi->cfiq->BlockEraseTimeoutTyp;
else
cfi->chips[i].erase_time = 2000000;
if (cfi->cfiq->WordWriteTimeoutTyp &&
cfi->cfiq->WordWriteTimeoutMax)
cfi->chips[i].word_write_time_max =
1<<(cfi->cfiq->WordWriteTimeoutTyp +
cfi->cfiq->WordWriteTimeoutMax);
else
cfi->chips[i].word_write_time_max = 50000 * 8;
if (cfi->cfiq->BufWriteTimeoutTyp &&
cfi->cfiq->BufWriteTimeoutMax)
cfi->chips[i].buffer_write_time_max =
1<<(cfi->cfiq->BufWriteTimeoutTyp +
cfi->cfiq->BufWriteTimeoutMax);
if (cfi->cfiq->BlockEraseTimeoutTyp &&
cfi->cfiq->BlockEraseTimeoutMax)
cfi->chips[i].erase_time_max =
1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
cfi->cfiq->BlockEraseTimeoutMax);
else
cfi->chips[i].erase_time_max = 2000000 * 8;
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
map->fldrv = &cfi_intelext_chipdrv;
return cfi_intelext_setup(mtd);
}
struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long offset = 0;
int i,j;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
if (!mtd->eraseregions) {
printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
goto setup_err;
}
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
if (mtd->erasesize < ersize) {
mtd->erasesize = ersize;
}
for (j=0; j<cfi->numchips; j++) {
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
}
offset += (ersize * ernum);
}
if (offset != devsize) {
/* Argh */
printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
goto setup_err;
}
for (i=0; i<mtd->numeraseregions;i++){
printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
i,(unsigned long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
}
#ifdef CONFIG_MTD_OTP
mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
#endif
/* This function has the potential to distort the reality
a bit and therefore should be called last. */
if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
goto setup_err;
__module_get(THIS_MODULE);
register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
struct cfi_private **pcfi)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = *pcfi;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
/*
* Probing of multi-partition flash chips.
*
* To support multiple partitions when available, we simply arrange
* for each of them to have their own flchip structure even if they
* are on the same physical chip. This means completely recreating
* a new cfi_private structure right here which is a blatent code
* layering violation, but this is still the least intrusive
* arrangement at this point. This can be rearranged in the future
* if someone feels motivated enough. --nico
*/
if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
&& extp->FeatureSupport & (1 << 9)) {
struct cfi_private *newcfi;
struct flchip *chip;
struct flchip_shared *shared;
int offs, numregions, numparts, partshift, numvirtchips, i, j;
/* Protection Register info */
offs = (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
/* Burst Read info */
offs += extp->extra[offs+1]+2;
/* Number of partition regions */
numregions = extp->extra[offs];
offs += 1;
/* skip the sizeof(partregion) field in CFI 1.4 */
if (extp->MinorVersion >= '4')
offs += 2;
/* Number of hardware partitions */
numparts = 0;
for (i = 0; i < numregions; i++) {
struct cfi_intelext_regioninfo *rinfo;
rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
numparts += rinfo->NumIdentPartitions;
offs += sizeof(*rinfo)
+ (rinfo->NumBlockTypes - 1) *
sizeof(struct cfi_intelext_blockinfo);
}
if (!numparts)
numparts = 1;
/* Programming Region info */
if (extp->MinorVersion >= '4') {
struct cfi_intelext_programming_regioninfo *prinfo;
prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
mtd->flags &= ~MTD_BIT_WRITEABLE;
printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
map->name, mtd->writesize,
cfi->interleave * prinfo->ControlValid,
cfi->interleave * prinfo->ControlInvalid);
}
/*
* All functions below currently rely on all chips having
* the same geometry so we'll just assume that all hardware
* partitions are of the same size too.
*/
partshift = cfi->chipshift - __ffs(numparts);
if ((1 << partshift) < mtd->erasesize) {
printk( KERN_ERR
"%s: bad number of hw partitions (%d)\n",
__func__, numparts);
return -EINVAL;
}
numvirtchips = cfi->numchips * numparts;
newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!newcfi)
return -ENOMEM;
shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
if (!shared) {
kfree(newcfi);
return -ENOMEM;
}
memcpy(newcfi, cfi, sizeof(struct cfi_private));
newcfi->numchips = numvirtchips;
newcfi->chipshift = partshift;
chip = &newcfi->chips[0];
for (i = 0; i < cfi->numchips; i++) {
shared[i].writing = shared[i].erasing = NULL;
mutex_init(&shared[i].lock);
for (j = 0; j < numparts; j++) {
*chip = cfi->chips[i];
chip->start += j << partshift;
chip->priv = &shared[i];
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
mutex_init(&chip->mutex);
chip++;
}
}
printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
"--> %d partitions of %d KiB\n",
map->name, cfi->numchips, cfi->interleave,
newcfi->numchips, 1<<(newcfi->chipshift-10));
map->fldrv_priv = newcfi;
*pcfi = newcfi;
kfree(cfi);
}
return 0;
}
/*
* *********** CHIP ACCESS FUNCTIONS ***********
*/
static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
DECLARE_WAITQUEUE(wait, current);
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
unsigned long timeo = jiffies + HZ;
/* Prevent setting state FL_SYNCING for chip in suspended state. */
if (mode == FL_SYNCING && chip->oldstate != FL_READY)
goto sleep;
switch (chip->state) {
case FL_STATUS:
for (;;) {
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* At this point we're fine with write operations
in other partitions as they don't conflict. */
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
/* Fall through */
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
return 0;
case FL_ERASING:
if (!cfip ||
!(cfip->FeatureSupport & 2) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
/* Erase suspend */
map_write(map, CMD(0xB0), adr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
map_write(map, CMD(0x70), adr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
status = map_read(map, adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
if (time_after(jiffies, timeo)) {
/* Urgh. Resume and pretend we weren't here.
* Make sure we're in 'read status' mode if it had finished */
put_chip(map, chip, adr);
printk(KERN_ERR "%s: Chip not ready after erase "
"suspended: status = 0x%lx\n", map->name, status.x[0]);
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
chip->state = FL_STATUS;
return 0;
case FL_XIP_WHILE_ERASING:
if (mode != FL_READY && mode != FL_POINT &&
(mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
goto sleep;
chip->oldstate = chip->state;
chip->state = FL_READY;
return 0;
case FL_SHUTDOWN:
/* The machine is rebooting now,so no one can get chip anymore */
return -EIO;
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
/* Fall through */
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
int ret;
DECLARE_WAITQUEUE(wait, current);
retry:
if (chip->priv &&
(mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
|| mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
/*
* OK. We have possibility for contention on the write/erase
* operations which are global to the real chip and not per
* partition. So let's fight it over in the partition which
* currently has authority on the operation.
*
* The rules are as follows:
*
* - any write operation must own shared->writing.
*
* - any erase operation must own _both_ shared->writing and
* shared->erasing.
*
* - contention arbitration is handled in the owner's context.
*
* The 'shared' struct can be read and/or written only when
* its lock is taken.
*/
struct flchip_shared *shared = chip->priv;
struct flchip *contender;
mutex_lock(&shared->lock);
contender = shared->writing;
if (contender && contender != chip) {
/*
* The engine to perform desired operation on this
* partition is already in use by someone else.
* Let's fight over it in the context of the chip
* currently using it. If it is possible to suspend,
* that other partition will do just that, otherwise
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
ret = mutex_trylock(&contender->mutex);
mutex_unlock(&shared->lock);
if (!ret)
goto retry;
mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
mutex_unlock(&contender->mutex);
return ret;
}
mutex_lock(&shared->lock);
/* We should not own chip if it is already
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
mutex_unlock(&contender->mutex);
goto retry;
}
mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
* on this chip. Sleep. */
if (mode == FL_ERASING && shared->erasing
&& shared->erasing->oldstate == FL_ERASING) {
mutex_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
goto retry;
}
/* We now own it */
shared->writing = chip;
if (mode == FL_ERASING)
shared->erasing = chip;
mutex_unlock(&shared->lock);
}
ret = chip_ready(map, chip, adr, mode);
if (ret == -EAGAIN)
goto retry;
return ret;
}
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->priv) {
struct flchip_shared *shared = chip->priv;
mutex_lock(&shared->lock);
if (shared->writing == chip && chip->oldstate == FL_READY) {
/* We own the ability to write, but we're done */
shared->writing = shared->erasing;
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
mutex_lock(&loaner->mutex);
mutex_unlock(&shared->lock);
mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
mutex_lock(&chip->mutex);
mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
shared->erasing = NULL;
shared->writing = NULL;
} else if (shared->erasing == chip && shared->writing != chip) {
/*
* We own the ability to erase without the ability
* to write, which means the erase was suspended
* and some other partition is currently writing.
* Don't let the switch below mess things up since
* we don't have ownership to resume anything.
*/
mutex_unlock(&shared->lock);
wake_up(&chip->wq);
return;
}
mutex_unlock(&shared->lock);
}
switch(chip->oldstate) {
case FL_ERASING:
/* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
one in READY mode. That's bad, and caused -EROFS
errors to be returned from do_erase_oneblock because
that's the only bit it checked for at the time.
As the state machine appears to explicitly allow
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
map_write(map, CMD(0xd0), adr);
map_write(map, CMD(0x70), adr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
case FL_XIP_WHILE_ERASING:
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
break;
case FL_READY:
case FL_STATUS:
case FL_JEDEC_QUERY:
/* We should really make set_vpp() count, rather than doing this */
DISABLE_VPP(map);
break;
default:
printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
}
wake_up(&chip->wq);
}
#ifdef CONFIG_MTD_XIP
/*
* No interrupt what so ever can be serviced while the flash isn't in array
* mode. This is ensured by the xip_disable() and xip_enable() functions
* enclosing any code path where the flash is known not to be in array mode.
* And within a XIP disabled code path, only functions marked with __xipram
* may be called and nothing else (it's a good thing to inspect generated
* assembly to make sure inline functions were actually inlined and that gcc
* didn't emit calls to its own support functions). Also configuring MTD CFI
* support to a single buswidth and a single interleave is also recommended.
*/
static void xip_disable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
/* TODO: chips with no XIP use should ignore and return */
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
local_irq_disable();
}
static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xff), adr);
chip->state = FL_READY;
}
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
}
/*
* When a delay is required for the flash operation to complete, the
* xip_wait_for_operation() function is polling for both the given timeout
* and pending (but still masked) hardware interrupts. Whenever there is an
* interrupt pending then the flash erase or write operation is suspended,
* array mode restored and interrupts unmasked. Task scheduling might also
* happen at that point. The CPU eventually returns from the interrupt or
* the call to schedule() and the suspended flash operation is resumed for
* the remaining of the delay period.
*
* Warning: this function _will_ fool interrupt latency tracing tools.
*/
static int __xipram xip_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long adr, unsigned int chip_op_time_max)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
map_word status, OK = CMD(0x80);
unsigned long usec, suspended, start, done;
flstate_t oldstate, newstate;
start = xip_currtime();
usec = chip_op_time_max;
if (usec == 0)
usec = 500000;
done = 0;
do {
cpu_relax();
if (xip_irqpending() && cfip &&
((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
(chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
(cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
/*
* Let's suspend the erase or write operation when
* supported. Note that we currently don't try to
* suspend interleaved chips if there is already
* another operation suspended (imagine what happens
* when one chip was already done with the current
* operation while another chip suspended it, then
* we resume the whole thing at once). Yes, it
* can happen!
*/
usec -= done;
map_write(map, CMD(0xb0), adr);
map_write(map, CMD(0x70), adr);
suspended = xip_currtime();
do {
if (xip_elapsed_since(suspended) > 100000) {
/*
* The chip doesn't want to suspend
* after waiting for 100 msecs.
* This is a critical error but there
* is not much we can do here.
*/
return -EIO;
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK));
/* Suspend succeeded */
oldstate = chip->state;
if (oldstate == FL_ERASING) {
if (!map_word_bitsset(map, status, CMD(0x40)))
break;
newstate = FL_XIP_WHILE_ERASING;
chip->erase_suspended = 1;
} else {
if (!map_word_bitsset(map, status, CMD(0x04)))
break;
newstate = FL_XIP_WHILE_WRITING;
chip->write_suspended = 1;
}
chip->state = newstate;
map_write(map, CMD(0xff), adr);
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
/*
* We're back. However someone else might have
* decided to go write to the chip if we are in
* a suspended erase state. If so let's wait
* until it's done.
*/
mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
/* Resume the write or erase operation */
map_write(map, CMD(0xd0), adr);
map_write(map, CMD(0x70), adr);
chip->state = oldstate;
start = xip_currtime();
} else if (usec >= 1000000/HZ) {
/*
* Try to save on CPU power when waiting delay
* is at least a system timer tick period.
* No need to be extremely accurate here.
*/
xip_cpu_idle();
}
status = map_read(map, adr);
done = xip_elapsed_since(start);
} while (!map_word_andequal(map, status, OK, OK)
&& done < usec);
return (done >= usec) ? -ETIME : 0;
}
/*
* The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
* the flash is actively programming or erasing since we have to poll for
* the operation to complete anyway. We can't do that in a generic way with
* a XIP setup so do it before the actual flash operation in this case
* and stub it out from INVAL_CACHE_AND_WAIT.
*/
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
xip_wait_for_operation(map, chip, cmd_adr, usec_max)
#else
#define xip_disable(map, chip, adr)
#define xip_enable(map, chip, adr)
#define XIP_INVAL_CACHED_RANGE(x...)
#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
static int inval_cache_and_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
unsigned int chip_op_time, unsigned int chip_op_time_max)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
timeo = 500000;
reset_timeo = timeo;
sleep_time = chip_op_time / 2;
for (;;) {
if (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
continue;
}
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occurred while sleep: reset timeout */
timeo = reset_timeo;
chip->erase_suspended = 0;
}
if (chip->write_suspended && chip_state == FL_WRITING) {
/* Write suspend occurred while sleep: reset timeout */
timeo = reset_timeo;
chip->write_suspended = 0;
}
if (!timeo) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
return -ETIME;
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
* can be performed with a sleeping delay instead
* of busy waiting.
*/
msleep(sleep_time/1000);
timeo -= sleep_time;
sleep_time = 1000000/HZ;
} else {
udelay(1);
cond_resched();
timeo--;
}
mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
return 0;
}
#endif
#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
if (!ret) {
if (chip->state != FL_POINT && chip->state != FL_READY)
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_POINT;
chip->ref_point_counter++;
}
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs, last_end = 0;
int chipnum;
int ret = 0;
if (!map->virt || (from + len > mtd->size))
return -EINVAL;
/* Now lock the chip(s) to POINT state */
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
*virt = map->virt + cfi->chips[chipnum].start + ofs;
*retlen = 0;
if (phys)
*phys = map->phys + cfi->chips[chipnum].start + ofs;
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
/* We cannot point across chips that are virtually disjoint */
if (!last_end)
last_end = cfi->chips[chipnum].start;
else if (cfi->chips[chipnum].start != last_end)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
if (ret)
break;
*retlen += thislen;
len -= thislen;
ofs = 0;
last_end += 1 << cfi->chipshift;
chipnum++;
}
return 0;
}
static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
/* Now unlock the chip(s) POINT state */
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
while (len) {
unsigned long thislen;
struct flchip *chip;
chip = &cfi->chips[chipnum];
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
chip->state = FL_READY;
} else
printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
chipnum++;
}
}
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
}
map_copy_from(map, buf, adr, len);
put_chip(map, chip, cmd_addr);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
*retlen = 0;
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
unsigned long adr, map_word datum, int mode)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd;
int ret=0;
adr += chip->start;
switch (mode) {
case FL_WRITING:
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
break;
case FL_OTP_WRITE:
write_cmd = CMD(0xc0);
break;
default:
return -EINVAL;
}
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
xip_disable(map, chip, adr);
map_write(map, write_cmd, adr);
map_write(map, datum, adr);
chip->state = mode;
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, map_bankwidth(map),
chip->word_write_time,
chip->word_write_time_max);
if (ret) {
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
goto out;
}
/* check for errors */
status = map_read(map, adr);
if (map_word_bitsset(map, status, CMD(0x1a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* reset status */
map_write(map, CMD(0x50), adr);
map_write(map, CMD(0x70), adr);
xip_enable(map, chip, adr);
if (chipstatus & 0x02) {
ret = -EROFS;
} else if (chipstatus & 0x08) {
printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
ret = -EIO;
} else {
printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
}
goto out;
}
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
int chipnum;
unsigned long ofs;
*retlen = 0;
if (!len)
return 0;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first byte write */
if (ofs & (map_bankwidth(map)-1)) {
unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
int gap = ofs - bus_ofs;
int n;
map_word datum;
n = min_t(int, len, map_bankwidth(map)-gap);
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, datum, FL_WRITING);
if (ret)
return ret;
len -= n;
ofs += n;
buf += n;
(*retlen) += n;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
while(len >= map_bankwidth(map)) {
map_word datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum, FL_WRITING);
if (ret)
return ret;
ofs += map_bankwidth(map);
buf += map_bankwidth(map);
(*retlen) += map_bankwidth(map);
len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
if (len & (map_bankwidth(map)-1)) {
map_word datum;
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum, FL_WRITING);
if (ret)
return ret;
(*retlen) += len;
}
return 0;
}
static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const struct kvec **pvec,
unsigned long *pvec_seek, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, write_cmd, datum;
unsigned long cmd_adr;
int ret, wbufsize, word_gap, words;
const struct kvec *vec;
unsigned long vec_seek;
unsigned long initial_adr;
int initial_len = len;
wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
adr += chip->start;
initial_adr = adr;
cmd_adr = adr & ~(wbufsize-1);
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
ENABLE_VPP(map);
xip_disable(map, chip, cmd_adr);
/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
[...], the device will not accept any more Write to Buffer commands".
So we must check here and reset those bits if they're set. Otherwise
we're just pissing in the wind */
if (chip->state != FL_STATUS) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
}
status = map_read(map, cmd_adr);
if (map_word_bitsset(map, status, CMD(0x30))) {
xip_enable(map, chip, cmd_adr);
printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
xip_disable(map, chip, cmd_adr);
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
}
chip->state = FL_WRITING_TO_BUFFER;
map_write(map, write_cmd, cmd_adr);
ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
if (ret) {
/* Argh. Not ready for write to buffer */
map_word Xstatus = map_read(map, cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
status = map_read(map, cmd_adr);
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
xip_enable(map, chip, cmd_adr);
printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
map->name, Xstatus.x[0], status.x[0]);
goto out;
}
/* Figure out the number of words to write */
word_gap = (-adr & (map_bankwidth(map)-1));
words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
if (!word_gap) {
words--;
} else {
word_gap = map_bankwidth(map) - word_gap;
adr -= word_gap;
datum = map_word_ff(map);
}
/* Write length of data to come */
map_write(map, CMD(words), cmd_adr );
/* Write data */
vec = *pvec;
vec_seek = *pvec_seek;
do {
int n = map_bankwidth(map) - word_gap;
if (n > vec->iov_len - vec_seek)
n = vec->iov_len - vec_seek;
if (n > len)
n = len;
if (!word_gap && len < map_bankwidth(map))
datum = map_word_ff(map);
datum = map_word_load_partial(map, datum,
vec->iov_base + vec_seek,
word_gap, n);
len -= n;
word_gap += n;
if (!len || word_gap == map_bankwidth(map)) {
map_write(map, datum, adr);
adr += map_bankwidth(map);
word_gap = 0;
}
vec_seek += n;
if (vec_seek == vec->iov_len) {
vec++;
vec_seek = 0;
}
} while (len);
*pvec = vec;
*pvec_seek = vec_seek;
/* GO GO GO */
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
initial_adr, initial_len,
chip->buffer_write_time,
chip->buffer_write_time_max);
if (ret) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
xip_enable(map, chip, cmd_adr);
printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
goto out;
}
/* check for errors */
status = map_read(map, cmd_adr);
if (map_word_bitsset(map, status, CMD(0x1a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* reset status */
map_write(map, CMD(0x50), cmd_adr);
map_write(map, CMD(0x70), cmd_adr);
xip_enable(map, chip, cmd_adr);
if (chipstatus & 0x02) {
ret = -EROFS;
} else if (chipstatus & 0x08) {
printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
ret = -EIO;
} else {
printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
}
goto out;
}
xip_enable(map, chip, cmd_adr);
out: put_chip(map, chip, cmd_adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
int ret = 0;
int chipnum;
unsigned long ofs, vec_seek, i;
size_t len = 0;
for (i = 0; i < count; i++)
len += vecs[i].iov_len;
*retlen = 0;
if (!len)
return 0;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
vec_seek = 0;
do {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len;
ret = do_write_buffer(map, &cfi->chips[chipnum],
ofs, &vecs, &vec_seek, size);
if (ret)
return ret;
ofs += size;
(*retlen) += size;
len -= size;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
/* Be nice and reschedule with the chip in a usable state for other
processes. */
cond_resched();
} while (len);
return 0;
}
static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct kvec vec;
vec.iov_base = (void *) buf;
vec.iov_len = len;
return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
}
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status;
int retries = 3;
int ret;
adr += chip->start;
retry:
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
/* Clear the status register first */
map_write(map, CMD(0x50), adr);
/* Now erase */
map_write(map, CMD(0x20), adr);
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
chip->erase_time,
chip->erase_time_max);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
goto out;
}
/* We've broken this before. It doesn't hurt to be safe */
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
status = map_read(map, adr);
/* check for errors */
if (map_word_bitsset(map, status, CMD(0x3a))) {
unsigned long chipstatus = MERGESTATUS(status);
/* Reset the error bits */
map_write(map, CMD(0x50), adr);
map_write(map, CMD(0x70), adr);
xip_enable(map, chip, adr);
if ((chipstatus & 0x30) == 0x30) {
printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
ret = -EINVAL;
} else if (chipstatus & 0x02) {
/* Protection bit set */
ret = -EROFS;
} else if (chipstatus & 0x8) {
/* Voltage */
printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
ret = -EIO;
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
ret = -EIO;
}
goto out;
}
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
{
unsigned long ofs, len;
int ret;
ofs = instr->addr;
len = instr->len;
ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
if (ret)
return ret;
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static void cfi_intelext_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
static int __xipram do_getlockstatus_oneblock(struct map_info *map,
struct flchip *chip,
unsigned long adr,
int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int status, ofs_factor = cfi->interleave * cfi->device_type;
adr += chip->start;
xip_disable(map, chip, adr+(2*ofs_factor));
map_write(map, CMD(0x90), adr+(2*ofs_factor));
chip->state = FL_JEDEC_QUERY;
status = cfi_read_query(map, adr+(2*ofs_factor));
xip_enable(map, chip, 0);
return status;
}
#ifdef DEBUG_LOCK_BITS
static int __xipram do_printlockstatus_oneblock(struct map_info *map,
struct flchip *chip,
unsigned long adr,
int len, void *thunk)
{
printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
return 0;
}
#endif
#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int udelay;
int ret;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
ENABLE_VPP(map);
xip_disable(map, chip, adr);
map_write(map, CMD(0x60), adr);
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
} else
BUG();
/*
* If Instant Individual Block Locking supported then no need
* to delay.
*/
udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
goto out;
}
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
__func__, ret);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
return ret;
}
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
int ret;
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
__func__, ofs, len);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
#ifdef DEBUG_LOCK_BITS
printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
__func__, ret);
cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
ofs, len, NULL);
#endif
return ret;
}
static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
uint64_t len)
{
return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
ofs, len, NULL) ? 1 : 0;
}
#ifdef CONFIG_MTD_OTP
typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
u_long data_offset, u_char *buf, u_int size,
u_long prot_offset, u_int groupno, u_int groupsize);
static int __xipram
do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
/* let's ensure we're not reading back cached data from array mode */
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
xip_disable(map, chip, chip->start);
if (chip->state != FL_JEDEC_QUERY) {
map_write(map, CMD(0x90), chip->start);
chip->state = FL_JEDEC_QUERY;
}
map_copy_from(map, buf, chip->start + offset, size);
xip_enable(map, chip, chip->start);
/* then ensure we don't keep OTP data in the cache */
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
mutex_unlock(&chip->mutex);
return 0;
}
static int
do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
int ret;
while (size) {
unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
int gap = offset - bus_ofs;
int n = min_t(int, size, map_bankwidth(map)-gap);
map_word datum = map_word_ff(map);
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
if (ret)
return ret;
offset += n;
buf += n;
size -= n;
}
return 0;
}
static int
do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word datum;
/* make sure area matches group boundaries */
if (size != grpsz)
return -EXDEV;
datum = map_word_ff(map);
datum = map_word_clr(map, datum, CMD(1 << grpno));
return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
}
static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf,
otp_op_t action, int user_regs)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
struct flchip *chip;
struct cfi_intelext_otpinfo *otp;
u_long devsize, reg_prot_offset, data_offset;
u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
int ret;
*retlen = 0;
/* Check that we actually have some OTP registers */
if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
return -ENODATA;
/* we need real chips here not virtual ones */
devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
chip_step = devsize >> cfi->chipshift;
chip_num = 0;
/* Some chips have OTP located in the _top_ partition only.
For example: Intel 28F256L18T (T means top-parameter device) */
if (cfi->mfr == CFI_MFR_INTEL) {
switch (cfi->id) {
case 0x880b:
case 0x880c:
case 0x880d:
chip_num = chip_step - 1;
}
}
for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
chip = &cfi->chips[chip_num];
otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
/* first OTP region */
field = 0;
reg_prot_offset = extp->ProtRegAddr;
reg_fact_groups = 1;
reg_fact_size = 1 << extp->FactProtRegSize;
reg_user_groups = 1;
reg_user_size = 1 << extp->UserProtRegSize;
while (len > 0) {
/* flash geometry fixup */
data_offset = reg_prot_offset + 1;
data_offset *= cfi->interleave * cfi->device_type;
reg_prot_offset *= cfi->interleave * cfi->device_type;
reg_fact_size *= cfi->interleave;
reg_user_size *= cfi->interleave;
if (user_regs) {
groups = reg_user_groups;
groupsize = reg_user_size;
/* skip over factory reg area */
groupno = reg_fact_groups;
data_offset += reg_fact_groups * reg_fact_size;
} else {
groups = reg_fact_groups;
groupsize = reg_fact_size;
groupno = 0;
}
while (len > 0 && groups > 0) {
if (!action) {
/*
* Special case: if action is NULL
* we fill buf with otp_info records.
*/
struct otp_info *otpinfo;
map_word lockword;
len -= sizeof(struct otp_info);
if (len <= 0)
return -ENOSPC;
ret = do_otp_read(map, chip,
reg_prot_offset,
(u_char *)&lockword,
map_bankwidth(map),
0, 0, 0);
if (ret)
return ret;
otpinfo = (struct otp_info *)buf;
otpinfo->start = from;
otpinfo->length = groupsize;
otpinfo->locked =
!map_word_bitsset(map, lockword,
CMD(1 << groupno));
from += groupsize;
buf += sizeof(*otpinfo);
*retlen += sizeof(*otpinfo);
} else if (from >= groupsize) {
from -= groupsize;
data_offset += groupsize;
} else {
int size = groupsize;
data_offset += from;
size -= from;
from = 0;
if (size > len)
size = len;
ret = action(map, chip, data_offset,
buf, size, reg_prot_offset,
groupno, groupsize);
if (ret < 0)
return ret;
buf += size;
len -= size;
*retlen += size;
data_offset += size;
}
groupno++;
groups--;
}
/* next OTP region */
if (++field == extp->NumProtectionFields)
break;
reg_prot_offset = otp->ProtRegAddr;
reg_fact_groups = otp->FactGroups;
reg_fact_size = 1 << otp->FactProtRegSize;
reg_user_groups = otp->UserGroups;
reg_user_size = 1 << otp->UserProtRegSize;
otp++;
}
}
return 0;
}
static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_read, 0);
}
static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_read, 1);
}
static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen,
u_char *buf)
{
return cfi_intelext_otp_walk(mtd, from, len, retlen,
buf, do_otp_write, 1);
}
static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
loff_t from, size_t len)
{
size_t retlen;
return cfi_intelext_otp_walk(mtd, from, len, &retlen,
NULL, do_otp_lock, 1);
}
static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
size_t retlen;
int ret;
ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
return ret ? : retlen;
}
static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
size_t retlen;
int ret;
ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
return ret ? : retlen;
}
#endif
static void cfi_intelext_save_locks(struct mtd_info *mtd)
{
struct mtd_erase_region_info *region;
int block, status, i;
unsigned long adr;
size_t len;
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (!region->lockmap)
continue;
for (block = 0; block < region->numblocks; block++){
len = region->erasesize;
adr = region->offset + block * len;
status = cfi_varsize_frob(mtd,
do_getlockstatus_oneblock, adr, len, NULL);
if (status)
set_bit(block, region->lockmap);
else
clear_bit(block, region->lockmap);
}
}
}
static int cfi_intelext_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int i;
struct flchip *chip;
int ret = 0;
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_save_locks(mtd);
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
if (chip->oldstate == FL_READY) {
/* place the chip in a known state before suspend */
map_write(map, CMD(0xFF), cfi->chips[i].start);
chip->oldstate = chip->state;
chip->state = FL_PM_SUSPENDED;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
} else {
/* There seems to be an operation pending. We must wait for it. */
printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
ret = -EAGAIN;
}
break;
default:
/* Should we actually wait? Once upon a time these routines weren't
allowed to. Or should we return -EAGAIN, because the upper layers
ought to have already shut down anything which was using the device
anyway? The latter for now. */
printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
ret = -EAGAIN;
case FL_PM_SUSPENDED:
break;
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
if (ret) {
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
because we're returning failure, and it didn't
get power cycled */
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
return ret;
}
static void cfi_intelext_restore_locks(struct mtd_info *mtd)
{
struct mtd_erase_region_info *region;
int block, i;
unsigned long adr;
size_t len;
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (!region->lockmap)
continue;
for (block = 0; block < region->numblocks; block++) {
len = region->erasesize;
adr = region->offset + block * len;
if (!test_bit(block, region->lockmap))
cfi_intelext_unlock(mtd, adr, len);
}
}
}
static void cfi_intelext_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp = cfi->cmdset_priv;
int i;
struct flchip *chip;
for (i=0; i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
map_write(map, CMD(0xFF), cfi->chips[i].start);
chip->oldstate = chip->state = FL_READY;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_restore_locks(mtd);
}
static int cfi_intelext_reset(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i, ret;
for (i=0; i < cfi->numchips; i++) {
struct flchip *chip = &cfi->chips[i];
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
mutex_unlock(&chip->mutex);
}
return 0;
}
static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
void *v)
{
struct mtd_info *mtd;
mtd = container_of(nb, struct mtd_info, reboot_notifier);
cfi_intelext_reset(mtd);
return NOTIFY_DONE;
}
static void cfi_intelext_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_erase_region_info *region;
int i;
cfi_intelext_reset(mtd);
unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi->chips[0].priv);
kfree(cfi);
for (i = 0; i < mtd->numeraseregions; i++) {
region = &mtd->eraseregions[i];
if (region->lockmap)
kfree(region->lockmap);
}
kfree(mtd->eraseregions);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
MODULE_ALIAS("cfi_cmdset_0003");
MODULE_ALIAS("cfi_cmdset_0200");
| gpl-2.0 |
Blechd0se/kernel-moto-g | drivers/net/wireless/rtlwifi/rtl8192de/dm.c | 4818 | 47624 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "dm.h"
#include "fw.h"
#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb
struct dig_t de_digtable;
static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
0x7f8001fe, /* 0, +6.0dB */
0x788001e2, /* 1, +5.5dB */
0x71c001c7, /* 2, +5.0dB */
0x6b8001ae, /* 3, +4.5dB */
0x65400195, /* 4, +4.0dB */
0x5fc0017f, /* 5, +3.5dB */
0x5a400169, /* 6, +3.0dB */
0x55400155, /* 7, +2.5dB */
0x50800142, /* 8, +2.0dB */
0x4c000130, /* 9, +1.5dB */
0x47c0011f, /* 10, +1.0dB */
0x43c0010f, /* 11, +0.5dB */
0x40000100, /* 12, +0dB */
0x3c8000f2, /* 13, -0.5dB */
0x390000e4, /* 14, -1.0dB */
0x35c000d7, /* 15, -1.5dB */
0x32c000cb, /* 16, -2.0dB */
0x300000c0, /* 17, -2.5dB */
0x2d4000b5, /* 18, -3.0dB */
0x2ac000ab, /* 19, -3.5dB */
0x288000a2, /* 20, -4.0dB */
0x26000098, /* 21, -4.5dB */
0x24000090, /* 22, -5.0dB */
0x22000088, /* 23, -5.5dB */
0x20000080, /* 24, -6.0dB */
0x1e400079, /* 25, -6.5dB */
0x1c800072, /* 26, -7.0dB */
0x1b00006c, /* 27. -7.5dB */
0x19800066, /* 28, -8.0dB */
0x18000060, /* 29, -8.5dB */
0x16c0005b, /* 30, -9.0dB */
0x15800056, /* 31, -9.5dB */
0x14400051, /* 32, -10.0dB */
0x1300004c, /* 33, -10.5dB */
0x12000048, /* 34, -11.0dB */
0x11000044, /* 35, -11.5dB */
0x10000040, /* 36, -12.0dB */
0x0f00003c, /* 37, -12.5dB */
0x0e400039, /* 38, -13.0dB */
0x0d800036, /* 39, -13.5dB */
0x0cc00033, /* 40, -14.0dB */
0x0c000030, /* 41, -14.5dB */
0x0b40002d, /* 42, -15.0dB */
};
static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
{0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
{0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
{0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
{0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
{0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
{0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
{0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
};
static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
{0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
{0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
{0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
{0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
{0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
{0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
{0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
{0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
{0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
{0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
{0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
};
static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
{
de_digtable.dig_enable_flag = true;
de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
de_digtable.cur_igvalue = 0x20;
de_digtable.pre_igvalue = 0x0;
de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
de_digtable.presta_connectstate = DIG_STA_DISCONNECT;
de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER;
de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER;
de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
de_digtable.large_fa_hit = 0;
de_digtable.recover_cnt = 0;
de_digtable.forbidden_igi = DM_DIG_FA_LOWER;
}
static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
unsigned long flag = 0;
/* hold ofdm counter */
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD);
falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD);
falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD);
falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_fast_fsync_fail +
falsealm_cnt->cnt_sb_search_fail;
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* hold cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0);
falsealm_cnt->cnt_cck_fail = ret_value;
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3);
falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
falsealm_cnt->cnt_cck_fail = 0;
}
/* reset false alarm counter registers */
falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
falsealm_cnt->cnt_sb_search_fail +
falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_cck_fail;
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
/* update ofdm counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
/* update page C counter */
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0);
/* update page D counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* reset cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
/* enable cck counter */
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
falsealm_cnt->cnt_fast_fsync_fail,
falsealm_cnt->cnt_sb_search_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
falsealm_cnt->cnt_parity_fail,
falsealm_cnt->cnt_rate_illegal,
falsealm_cnt->cnt_crc8_fail,
falsealm_cnt->cnt_mcs_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
falsealm_cnt->cnt_ofdm_fail,
falsealm_cnt->cnt_cck_fail,
falsealm_cnt->cnt_all);
}
static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtlpriv);
/* Determine the minimum RSSI */
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
de_digtable.min_undecorated_pwdb_for_dm = 0;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
rtlpriv->dm.UNDEC_SM_PWDB);
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"STA Default Port PWDB = 0x%x\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Ext Port or disconnect PWDB = 0x%x\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned long flag = 0;
if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) {
if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
if (de_digtable.min_undecorated_pwdb_for_dm <= 25)
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HIGHRSSI;
} else {
if (de_digtable.min_undecorated_pwdb_for_dm <= 20)
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HIGHRSSI;
}
} else {
de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
}
if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) {
if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state;
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
de_digtable.cursta_connectctate == DIG_STA_CONNECT ?
"DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
"Low RSSI " : "High RSSI ");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
}
void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
de_digtable.cur_igvalue, de_digtable.pre_igvalue,
de_digtable.backoff_val);
if (de_digtable.dig_enable_flag == false) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
de_digtable.pre_igvalue = 0x17;
return;
}
if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
de_digtable.cur_igvalue);
rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
de_digtable.cur_igvalue);
de_digtable.pre_igvalue = de_digtable.cur_igvalue;
}
}
static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
{
if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
(rtlpriv->mac80211.vendor == PEER_CISCO)) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50
&& de_digtable.min_undecorated_pwdb_for_dm < 50) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode Off\n");
} else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 &&
de_digtable.min_undecorated_pwdb_for_dm > 55) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode On\n");
}
} else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
}
}
static void rtl92d_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value_igi = de_digtable.cur_igvalue;
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
if (rtlpriv->rtlhal.earlymode_enable) {
rtl92d_early_mode_enabled(rtlpriv);
de_digtable.last_min_undecorated_pwdb_for_dm =
de_digtable.min_undecorated_pwdb_for_dm;
}
if (!rtlpriv->dm.dm_initialgain_enable)
return;
/* because we will send data pkt when scanning
* this will cause some ap like gear-3700 wep TP
* lower if we retrun here, this is the diff of
* mac80211 driver vs ieee80211 driver */
/* if (rtlpriv->mac80211.act_scanning)
* return; */
/* Not STA mode return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
de_digtable.cursta_connectctate = DIG_STA_CONNECT;
else
de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
/* adjust initial gain according to false alarm counter */
if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
value_igi--;
else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
value_igi += 0;
else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
value_igi++;
else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
/* deal with abnorally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG(): Abnormally false alarm case\n");
de_digtable.large_fa_hit++;
if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) {
de_digtable.forbidden_igi = de_digtable.cur_igvalue;
de_digtable.large_fa_hit = 1;
}
if (de_digtable.large_fa_hit >= 3) {
if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX)
de_digtable.rx_gain_range_min = DM_DIG_MAX;
else
de_digtable.rx_gain_range_min =
(de_digtable.forbidden_igi + 1);
de_digtable.recover_cnt = 3600; /* 3600=2hr */
}
} else {
/* Recovery mechanism for IGI lower bound */
if (de_digtable.recover_cnt != 0) {
de_digtable.recover_cnt--;
} else {
if (de_digtable.large_fa_hit == 0) {
if ((de_digtable.forbidden_igi - 1) <
DM_DIG_FA_LOWER) {
de_digtable.forbidden_igi =
DM_DIG_FA_LOWER;
de_digtable.rx_gain_range_min =
DM_DIG_FA_LOWER;
} else {
de_digtable.forbidden_igi--;
de_digtable.rx_gain_range_min =
(de_digtable.forbidden_igi + 1);
}
} else if (de_digtable.large_fa_hit == 3) {
de_digtable.large_fa_hit = 0;
}
}
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
if (value_igi > DM_DIG_MAX)
value_igi = DM_DIG_MAX;
else if (value_igi < de_digtable.rx_gain_range_min)
value_igi = de_digtable.rx_gain_range_min;
de_digtable.cur_igvalue = value_igi;
rtl92d_dm_write_dig(hw);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
rtl92d_dm_cck_packet_detection_thresh(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
}
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dynamic_txpower_enable = true;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
}
static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undecorated_smoothed_pwdb;
if ((!rtlpriv->dm.dynamic_txpower_enable)
|| rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"IBSS Client PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
if (undecorated_smoothed_pwdb >= 0x33) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb < 0x33)
&& (undecorated_smoothed_pwdb >= 0x2b)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb < 0x2b) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Normal\n");
}
} else {
if (undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else
if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
&& (undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
}
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* AP & ADHOC & MESH will return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
/* Indicate Rx signal strength to FW. */
if (rtlpriv->dm.useramask) {
u32 temp = rtlpriv->dm.undecorated_smoothed_pwdb;
temp <<= 16;
temp |= 0x100;
/* fw v12 cmdid 5:use max macid ,for nic ,
* default macid is 0 ,max macid is 1 */
rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp));
} else {
rtl_write_byte(rtlpriv, 0x4fe,
(u8) rtlpriv->dm.undecorated_smoothed_pwdb);
}
}
void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.current_turbo_edca = false;
rtlpriv->dm.is_any_nonbepkts = false;
rtlpriv->dm.is_cur_rdlstate = false;
}
static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
static u64 last_txok_cnt;
static u64 last_rxok_cnt;
u64 cur_txok_cnt;
u64 cur_rxok_cnt;
u32 edca_be_ul = 0x5ea42b;
u32 edca_be_dl = 0x5ea42b;
if (mac->link_state != MAC80211_LINKED) {
rtlpriv->dm.current_turbo_edca = false;
goto exit;
}
/* Enable BEQ TxOP limit configuration in wireless G-mode. */
/* To check whether we shall force turn on TXOP configuration. */
if ((!rtlpriv->dm.disable_framebursting) &&
(rtlpriv->sec.pairwise_enc_algorithm == WEP40_ENCRYPTION ||
rtlpriv->sec.pairwise_enc_algorithm == WEP104_ENCRYPTION ||
rtlpriv->sec.pairwise_enc_algorithm == TKIP_ENCRYPTION)) {
/* Force TxOP limit to 0x005e for UL. */
if (!(edca_be_ul & 0xffff0000))
edca_be_ul |= 0x005e0000;
/* Force TxOP limit to 0x005e for DL. */
if (!(edca_be_dl & 0xffff0000))
edca_be_dl |= 0x005e0000;
}
if ((!rtlpriv->dm.is_any_nonbepkts) &&
(!rtlpriv->dm.disable_framebursting)) {
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
if (cur_rxok_cnt > 4 * cur_txok_cnt) {
if (!rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
edca_be_dl);
rtlpriv->dm.is_cur_rdlstate = true;
}
} else {
if (rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
edca_be_ul);
rtlpriv->dm.is_cur_rdlstate = false;
}
}
rtlpriv->dm.current_turbo_edca = true;
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
(u8 *) (&tmp));
rtlpriv->dm.current_turbo_edca = false;
}
}
exit:
rtlpriv->dm.is_any_nonbepkts = false;
last_txok_cnt = rtlpriv->stats.txbytesunicast;
last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
}
static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
0x0a, 0x09, 0x08, 0x07, 0x06,
0x05, 0x04, 0x04, 0x03, 0x02
};
int i;
u32 u4tmp;
u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
rtlpriv->dm.thermalvalue_rxgain)]) << 12;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
}
static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
u8 *cck_index_old)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int i;
unsigned long flag = 0;
long temp_cck;
/* Query CCK default setting From 0xa24 */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
BMASKDWORD) & BMASKCCK;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
if (rtlpriv->dm.cck_inch14) {
if (!memcmp((void *)&temp_cck,
(void *)&cckswing_table_ch14[i][2], 4)) {
*cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
RCCK0_TXFILTER2, temp_cck,
*cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
} else {
if (!memcmp((void *) &temp_cck,
&cckswing_table_ch1ch13[i][2], 4)) {
*cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
RCCK0_TXFILTER2, temp_cck,
*cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
}
}
*temp_cckg = temp_cck;
}
static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
bool *internal_pa, u8 thermalvalue, u8 delta,
u8 rf, struct rtl_efuse *rtlefuse,
struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
u8 index_mapping[5][INDEX_MAPPING_NUM],
u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
{
int i;
u8 index;
u8 offset = 0;
for (i = 0; i < rf; i++) {
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
rtlhal->interfaceindex == 1) /* MAC 1 5G */
*internal_pa = rtlefuse->internal_pa_5g[1];
else
*internal_pa = rtlefuse->internal_pa_5g[i];
if (*internal_pa) {
if (rtlhal->interfaceindex == 1 || i == rf)
offset = 4;
else
offset = 0;
if (rtlphy->current_channel >= 100 &&
rtlphy->current_channel <= 165)
offset += 2;
} else {
if (rtlhal->interfaceindex == 1 || i == rf)
offset = 2;
else
offset = 0;
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter)
offset++;
if (*internal_pa) {
if (delta > INDEX_MAPPING_NUM - 1)
index = index_mapping_pa[offset]
[INDEX_MAPPING_NUM - 1];
else
index =
index_mapping_pa[offset][delta];
} else {
if (delta > INDEX_MAPPING_NUM - 1)
index =
index_mapping[offset][INDEX_MAPPING_NUM - 1];
else
index = index_mapping[offset][delta];
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
if (*internal_pa && thermalvalue > 0x12) {
ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
((delta / 2) * 3 + (delta % 2));
} else {
ofdm_index[i] -= index;
}
} else {
ofdm_index[i] += index;
}
}
}
static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
u8 offset, thermalvalue_avg_count = 0;
u32 thermalvalue_avg = 0;
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
u8 ofdm_index[2];
u8 cck_index = 0;
u8 ofdm_index_old[2];
u8 cck_index_old = 0;
u8 index;
int i;
bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
u8 indexforchannel =
rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
u8 index_mapping[5][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, decrease power */
{0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
/* 5G, path A/MAC 0, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, decrease power */
{0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
/* 5G, path B/MAC 1, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
/* 2.4G, for decreas power */
{0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
};
u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, ch36-64, decrease power */
{0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
/* 5G, path A/MAC 0, ch36-64, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path A/MAC 0, ch100-165, decrease power */
{0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
/* 5G, path A/MAC 0, ch100-165, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, ch36-64, decrease power */
{0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
/* 5G, path B/MAC 1, ch36-64, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, ch100-165, decrease power */
{0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
/* 5G, path B/MAC 1, ch100-165, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
};
rtlpriv->dm.txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
thermalvalue,
rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
rtl92d_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
if (is2t)
rf = 2;
else
rf = 1;
if (thermalvalue) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD) & BMASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
ROFDM0_XATxIQIMBALANCE,
ele_d, ofdm_index_old[0]);
break;
}
}
if (is2t) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD) & BMASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d ==
(ofdmswing_table[i] & BMASKOFDM_D)) {
ofdm_index_old[1] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
"Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XBTxIQIMBALANCE, ele_d,
ofdm_index_old[1]);
break;
}
}
}
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
} else {
temp_cck = 0x090e1317;
cck_index_old = 12;
}
if (!rtlpriv->dm.thermalvalue) {
rtlpriv->dm.thermalvalue =
rtlefuse->eeprom_thermalmeter;
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtlpriv->dm.thermalvalue_rxgain =
rtlefuse->eeprom_thermalmeter;
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
}
if (rtlhal->reloadtxpowerindex) {
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"reload ofdm index for band switch\n");
}
rtlpriv->dm.thermalvalue_avg
[rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
rtlpriv->dm.thermalvalue_avg_index++;
if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
rtlpriv->dm.thermalvalue_avg_index = 0;
for (i = 0; i < AVG_THERMAL_NUM; i++) {
if (rtlpriv->dm.thermalvalue_avg[i]) {
thermalvalue_avg +=
rtlpriv->dm.thermalvalue_avg[i];
thermalvalue_avg_count++;
}
}
if (thermalvalue_avg_count)
thermalvalue = (u8) (thermalvalue_avg /
thermalvalue_avg_count);
if (rtlhal->reloadtxpowerindex) {
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
rtlhal->reloadtxpowerindex = false;
rtlpriv->dm.done_txpower = false;
} else if (rtlpriv->dm.done_txpower) {
delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
(thermalvalue - rtlpriv->dm.thermalvalue) :
(rtlpriv->dm.thermalvalue - thermalvalue);
} else {
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
}
delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
(thermalvalue - rtlpriv->dm.thermalvalue_lck) :
(rtlpriv->dm.thermalvalue_lck - thermalvalue);
delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
delta_rxgain =
(thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
(thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
(rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter, delta, delta_lck,
delta_iqk);
if ((delta_lck > rtlefuse->delta_lck) &&
(rtlefuse->delta_lck != 0)) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtl92d_phy_lc_calibrate(hw);
}
if (delta > 0 && rtlpriv->dm.txpower_track_control) {
rtlpriv->dm.done_txpower = true;
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
offset = 4;
if (delta > INDEX_MAPPING_NUM - 1)
index = index_mapping[offset]
[INDEX_MAPPING_NUM - 1];
else
index = index_mapping[offset][delta];
if (thermalvalue > rtlpriv->dm.thermalvalue) {
for (i = 0; i < rf; i++)
ofdm_index[i] -= delta;
cck_index -= delta;
} else {
for (i = 0; i < rf; i++)
ofdm_index[i] += index;
cck_index += index;
}
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
rtl92d_bandtype_5G(rtlhal, ofdm_index,
&internal_pa, thermalvalue,
delta, rf, rtlefuse, rtlpriv,
rtlphy, index_mapping,
index_mapping_internal_pa);
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.ofdm_index[1],
rtlpriv->dm.cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.cck_index);
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
else if (ofdm_index[i] < ofdm_min_index)
ofdm_index[i] = ofdm_min_index;
}
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
if (cck_index > CCK_TABLE_SIZE - 1) {
cck_index = CCK_TABLE_SIZE - 1;
} else if (internal_pa ||
rtlhal->current_bandtype ==
BAND_ON_2_4G) {
if (ofdm_index[i] <
ofdm_min_index_internal_pa)
ofdm_index[i] =
ofdm_min_index_internal_pa;
} else if (cck_index < 0) {
cck_index = 0;
}
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
ofdm_index[0], ofdm_index[1],
cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x,cck_index = 0x%x\n",
ofdm_index[0], cck_index);
}
ele_d = (ofdmswing_table[(u8) ofdm_index[0]] &
0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][0];
val_y = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][1];
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
ele_a =
((val_x * ele_d) >> 8) & 0x000003FF;
/* new element C = element D x Y */
if ((val_y & 0x00000200) != 0)
val_y = val_y | 0xFFFFFC00;
ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
/* wirte new elements A, C, D to regC80 and
* regC94, element B is always 0 */
value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
16) | ele_a;
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
value32);
} else {
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD,
ofdmswing_table
[(u8)ofdm_index[0]]);
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(24), 0x00);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
rtlhal->interfaceindex,
val_x, val_y, ele_a, ele_c, ele_d,
val_x, val_y);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
if (!rtlpriv->dm.cck_inch14) {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch1ch13
[(u8)cck_index][0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch1ch13
[(u8)cck_index][1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch1ch13
[(u8)cck_index][2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch1ch13
[(u8)cck_index][3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch1ch13
[(u8)cck_index][4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch1ch13
[(u8)cck_index][5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch1ch13
[(u8)cck_index][6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch1ch13
[(u8)cck_index][7]);
} else {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch14
[(u8)cck_index][0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch14
[(u8)cck_index][1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch14
[(u8)cck_index][2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch14
[(u8)cck_index][3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch14
[(u8)cck_index][4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch14
[(u8)cck_index][5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch14
[(u8)cck_index][6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch14
[(u8)cck_index][7]);
}
}
if (is2t) {
ele_d = (ofdmswing_table[(u8) ofdm_index[1]] &
0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][4];
val_y = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][5];
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
/* consider minus */
val_x = val_x | 0xFFFFFC00;
ele_a = ((val_x * ele_d) >> 8) &
0x000003FF;
/* new element C = element D x Y */
if ((val_y & 0x00000200) != 0)
val_y =
val_y | 0xFFFFFC00;
ele_c =
((val_y *
ele_d) >> 8) & 0x00003FF;
/* write new elements A, C, D to regC88
* and regC9C, element B is always 0
*/
value32 = (ele_d << 22) |
((ele_c & 0x3F) << 16) |
ele_a;
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
BMASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), value32);
} else {
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD,
ofdmswing_table
[(u8) ofdm_index[1]]);
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
BMASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
val_x, val_y, ele_a, ele_c,
ele_d, val_x, val_y);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
BRFREGOFFSETMASK));
}
if ((delta_iqk > rtlefuse->delta_iqk) &&
(rtlefuse->delta_iqk != 0)) {
rtl92d_phy_reset_iqk_result(hw);
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtl92d_phy_iq_calibrate(hw);
}
if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G
&& thermalvalue <= rtlefuse->eeprom_thermalmeter) {
rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
rtl92d_dm_rxgain_tracking_thermalmeter(hw);
}
if (rtlpriv->dm.txpower_track_control)
rtlpriv->dm.thermalvalue = thermalvalue;
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
rtlpriv->dm.txpower_track_control = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"pMgntInfo->txpower_tracking = %d\n",
rtlpriv->dm.txpower_tracking);
}
void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
static u8 tm_trigger;
if (!rtlpriv->dm.txpower_tracking)
return;
if (!tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
BIT(16), 0x03);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Trigger 92S Thermal Meter!!\n");
tm_trigger = 1;
return;
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Schedule TxPowerTracking direct call!!\n");
rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
tm_trigger = 0;
}
}
void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rate_adaptive *ra = &(rtlpriv->ra);
ra->ratr_state = DM_RATR_STA_INIT;
ra->pre_ratr_state = DM_RATR_STA_INIT;
if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
rtlpriv->dm.useramask = true;
else
rtlpriv->dm.useramask = false;
}
void rtl92d_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl92d_dm_diginit(hw);
rtl92d_dm_init_dynamic_txpower(hw);
rtl92d_dm_init_edca_turbo(hw);
rtl92d_dm_init_rate_adaptive_mask(hw);
rtl92d_dm_initialize_txpower_tracking(hw);
}
void rtl92d_dm_watchdog(struct ieee80211_hw *hw)
{
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool fw_current_inpsmode = false;
bool fwps_awake = true;
/* 1. RF is OFF. (No need to do DM.)
* 2. Fw is under power saving mode for FwLPS.
* (Prevent from SW/FW I/O racing.)
* 3. IPS workitem is scheduled. (Prevent from IPS sequence
* to be swapped with DM.
* 4. RFChangeInProgress is TRUE.
* (Prevent from broken by IPS/HW/SW Rf off.) */
if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
fwps_awake) && (!ppsc->rfchange_inprogress)) {
rtl92d_dm_pwdb_monitor(hw);
rtl92d_dm_false_alarm_counter_statistics(hw);
rtl92d_dm_find_minimum_rssi(hw);
rtl92d_dm_dig(hw);
/* rtl92d_dm_dynamic_bb_powersaving(hw); */
rtl92d_dm_dynamic_txpower(hw);
/* rtl92d_dm_check_txpower_tracking_thermal_meter(hw); */
/* rtl92d_dm_refresh_rate_adaptive_mask(hw); */
/* rtl92d_dm_interrupt_migration(hw); */
rtl92d_dm_check_edca_turbo(hw);
}
}
| gpl-2.0 |
jamesonwilliams/kernel_msm | drivers/net/wireless/rtlwifi/rtl8192de/dm.c | 4818 | 47624 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../base.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "dm.h"
#include "fw.h"
#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb
struct dig_t de_digtable;
static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
0x7f8001fe, /* 0, +6.0dB */
0x788001e2, /* 1, +5.5dB */
0x71c001c7, /* 2, +5.0dB */
0x6b8001ae, /* 3, +4.5dB */
0x65400195, /* 4, +4.0dB */
0x5fc0017f, /* 5, +3.5dB */
0x5a400169, /* 6, +3.0dB */
0x55400155, /* 7, +2.5dB */
0x50800142, /* 8, +2.0dB */
0x4c000130, /* 9, +1.5dB */
0x47c0011f, /* 10, +1.0dB */
0x43c0010f, /* 11, +0.5dB */
0x40000100, /* 12, +0dB */
0x3c8000f2, /* 13, -0.5dB */
0x390000e4, /* 14, -1.0dB */
0x35c000d7, /* 15, -1.5dB */
0x32c000cb, /* 16, -2.0dB */
0x300000c0, /* 17, -2.5dB */
0x2d4000b5, /* 18, -3.0dB */
0x2ac000ab, /* 19, -3.5dB */
0x288000a2, /* 20, -4.0dB */
0x26000098, /* 21, -4.5dB */
0x24000090, /* 22, -5.0dB */
0x22000088, /* 23, -5.5dB */
0x20000080, /* 24, -6.0dB */
0x1e400079, /* 25, -6.5dB */
0x1c800072, /* 26, -7.0dB */
0x1b00006c, /* 27. -7.5dB */
0x19800066, /* 28, -8.0dB */
0x18000060, /* 29, -8.5dB */
0x16c0005b, /* 30, -9.0dB */
0x15800056, /* 31, -9.5dB */
0x14400051, /* 32, -10.0dB */
0x1300004c, /* 33, -10.5dB */
0x12000048, /* 34, -11.0dB */
0x11000044, /* 35, -11.5dB */
0x10000040, /* 36, -12.0dB */
0x0f00003c, /* 37, -12.5dB */
0x0e400039, /* 38, -13.0dB */
0x0d800036, /* 39, -13.5dB */
0x0cc00033, /* 40, -14.0dB */
0x0c000030, /* 41, -14.5dB */
0x0b40002d, /* 42, -15.0dB */
};
static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
{0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
{0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
{0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
{0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
{0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
{0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
{0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
{0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
{0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
{0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
{0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
{0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
{0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
{0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
{0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
{0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
{0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
{0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
{0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
{0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
{0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
{0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
{0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
{0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
{0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
{0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
{0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
{0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
};
static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
{0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
{0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
{0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
{0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
{0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
{0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
{0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
{0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
{0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
{0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
{0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
{0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
{0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
{0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
{0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
{0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
{0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
{0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
{0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
{0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
{0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
{0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
{0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
{0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
{0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
{0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
{0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
{0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
};
static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
{
de_digtable.dig_enable_flag = true;
de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
de_digtable.cur_igvalue = 0x20;
de_digtable.pre_igvalue = 0x0;
de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
de_digtable.presta_connectstate = DIG_STA_DISCONNECT;
de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER;
de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER;
de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
de_digtable.large_fa_hit = 0;
de_digtable.recover_cnt = 0;
de_digtable.forbidden_igi = DM_DIG_FA_LOWER;
}
static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
{
u32 ret_value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
unsigned long flag = 0;
/* hold ofdm counter */
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD);
falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD);
falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD);
falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_fast_fsync_fail +
falsealm_cnt->cnt_sb_search_fail;
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* hold cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0);
falsealm_cnt->cnt_cck_fail = ret_value;
ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3);
falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
falsealm_cnt->cnt_cck_fail = 0;
}
/* reset false alarm counter registers */
falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
falsealm_cnt->cnt_sb_search_fail +
falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
falsealm_cnt->cnt_crc8_fail +
falsealm_cnt->cnt_mcs_fail +
falsealm_cnt->cnt_cck_fail;
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
/* update ofdm counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
/* update page C counter */
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0);
/* update page D counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* reset cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
/* enable cck counter */
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
falsealm_cnt->cnt_fast_fsync_fail,
falsealm_cnt->cnt_sb_search_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
falsealm_cnt->cnt_parity_fail,
falsealm_cnt->cnt_rate_illegal,
falsealm_cnt->cnt_crc8_fail,
falsealm_cnt->cnt_mcs_fail);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
falsealm_cnt->cnt_ofdm_fail,
falsealm_cnt->cnt_cck_fail,
falsealm_cnt->cnt_all);
}
static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtlpriv);
/* Determine the minimum RSSI */
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
de_digtable.min_undecorated_pwdb_for_dm = 0;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Client PWDB = 0x%lx\n",
rtlpriv->dm.UNDEC_SM_PWDB);
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"STA Default Port PWDB = 0x%x\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
} else {
de_digtable.min_undecorated_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
"AP Ext Port or disconnect PWDB = 0x%x\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
de_digtable.min_undecorated_pwdb_for_dm);
}
static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
unsigned long flag = 0;
if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) {
if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
if (de_digtable.min_undecorated_pwdb_for_dm <= 25)
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HIGHRSSI;
} else {
if (de_digtable.min_undecorated_pwdb_for_dm <= 20)
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_LOWRSSI;
else
de_digtable.cur_cck_pd_state =
CCK_PD_STAGE_HIGHRSSI;
}
} else {
de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
}
if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) {
if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state;
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
de_digtable.cursta_connectctate == DIG_STA_CONNECT ?
"DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
"Low RSSI " : "High RSSI ");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
}
void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
de_digtable.cur_igvalue, de_digtable.pre_igvalue,
de_digtable.backoff_val);
if (de_digtable.dig_enable_flag == false) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
de_digtable.pre_igvalue = 0x17;
return;
}
if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
de_digtable.cur_igvalue);
rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
de_digtable.cur_igvalue);
de_digtable.pre_igvalue = de_digtable.cur_igvalue;
}
}
static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
{
if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
(rtlpriv->mac80211.vendor == PEER_CISCO)) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50
&& de_digtable.min_undecorated_pwdb_for_dm < 50) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode Off\n");
} else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 &&
de_digtable.min_undecorated_pwdb_for_dm > 55) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"Early Mode On\n");
}
} else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
}
}
static void rtl92d_dm_dig(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value_igi = de_digtable.cur_igvalue;
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
if (rtlpriv->rtlhal.earlymode_enable) {
rtl92d_early_mode_enabled(rtlpriv);
de_digtable.last_min_undecorated_pwdb_for_dm =
de_digtable.min_undecorated_pwdb_for_dm;
}
if (!rtlpriv->dm.dm_initialgain_enable)
return;
/* because we will send data pkt when scanning
* this will cause some ap like gear-3700 wep TP
* lower if we retrun here, this is the diff of
* mac80211 driver vs ieee80211 driver */
/* if (rtlpriv->mac80211.act_scanning)
* return; */
/* Not STA mode return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
de_digtable.cursta_connectctate = DIG_STA_CONNECT;
else
de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
/* adjust initial gain according to false alarm counter */
if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
value_igi--;
else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
value_igi += 0;
else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
value_igi++;
else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
/* deal with abnorally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG(): Abnormally false alarm case\n");
de_digtable.large_fa_hit++;
if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) {
de_digtable.forbidden_igi = de_digtable.cur_igvalue;
de_digtable.large_fa_hit = 1;
}
if (de_digtable.large_fa_hit >= 3) {
if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX)
de_digtable.rx_gain_range_min = DM_DIG_MAX;
else
de_digtable.rx_gain_range_min =
(de_digtable.forbidden_igi + 1);
de_digtable.recover_cnt = 3600; /* 3600=2hr */
}
} else {
/* Recovery mechanism for IGI lower bound */
if (de_digtable.recover_cnt != 0) {
de_digtable.recover_cnt--;
} else {
if (de_digtable.large_fa_hit == 0) {
if ((de_digtable.forbidden_igi - 1) <
DM_DIG_FA_LOWER) {
de_digtable.forbidden_igi =
DM_DIG_FA_LOWER;
de_digtable.rx_gain_range_min =
DM_DIG_FA_LOWER;
} else {
de_digtable.forbidden_igi--;
de_digtable.rx_gain_range_min =
(de_digtable.forbidden_igi + 1);
}
} else if (de_digtable.large_fa_hit == 3) {
de_digtable.large_fa_hit = 0;
}
}
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
de_digtable.large_fa_hit, de_digtable.forbidden_igi);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
"dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
if (value_igi > DM_DIG_MAX)
value_igi = DM_DIG_MAX;
else if (value_igi < de_digtable.rx_gain_range_min)
value_igi = de_digtable.rx_gain_range_min;
de_digtable.cur_igvalue = value_igi;
rtl92d_dm_write_dig(hw);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
rtl92d_dm_cck_packet_detection_thresh(hw);
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
}
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dynamic_txpower_enable = true;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
}
static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undecorated_smoothed_pwdb;
if ((!rtlpriv->dm.dynamic_txpower_enable)
|| rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
"Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"IBSS Client PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"STA Default Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"AP Ext Port PWDB = 0x%lx\n",
undecorated_smoothed_pwdb);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
if (undecorated_smoothed_pwdb >= 0x33) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
} else if ((undecorated_smoothed_pwdb < 0x33)
&& (undecorated_smoothed_pwdb >= 0x2b)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb < 0x2b) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
"5G:TxHighPwrLevel_Normal\n");
}
} else {
if (undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else
if ((undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
&& (undecorated_smoothed_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undecorated_smoothed_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"TXHIGHPWRLEVEL_NORMAL\n");
}
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
"PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
}
static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* AP & ADHOC & MESH will return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
/* Indicate Rx signal strength to FW. */
if (rtlpriv->dm.useramask) {
u32 temp = rtlpriv->dm.undecorated_smoothed_pwdb;
temp <<= 16;
temp |= 0x100;
/* fw v12 cmdid 5:use max macid ,for nic ,
* default macid is 0 ,max macid is 1 */
rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp));
} else {
rtl_write_byte(rtlpriv, 0x4fe,
(u8) rtlpriv->dm.undecorated_smoothed_pwdb);
}
}
void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.current_turbo_edca = false;
rtlpriv->dm.is_any_nonbepkts = false;
rtlpriv->dm.is_cur_rdlstate = false;
}
static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
static u64 last_txok_cnt;
static u64 last_rxok_cnt;
u64 cur_txok_cnt;
u64 cur_rxok_cnt;
u32 edca_be_ul = 0x5ea42b;
u32 edca_be_dl = 0x5ea42b;
if (mac->link_state != MAC80211_LINKED) {
rtlpriv->dm.current_turbo_edca = false;
goto exit;
}
/* Enable BEQ TxOP limit configuration in wireless G-mode. */
/* To check whether we shall force turn on TXOP configuration. */
if ((!rtlpriv->dm.disable_framebursting) &&
(rtlpriv->sec.pairwise_enc_algorithm == WEP40_ENCRYPTION ||
rtlpriv->sec.pairwise_enc_algorithm == WEP104_ENCRYPTION ||
rtlpriv->sec.pairwise_enc_algorithm == TKIP_ENCRYPTION)) {
/* Force TxOP limit to 0x005e for UL. */
if (!(edca_be_ul & 0xffff0000))
edca_be_ul |= 0x005e0000;
/* Force TxOP limit to 0x005e for DL. */
if (!(edca_be_dl & 0xffff0000))
edca_be_dl |= 0x005e0000;
}
if ((!rtlpriv->dm.is_any_nonbepkts) &&
(!rtlpriv->dm.disable_framebursting)) {
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
if (cur_rxok_cnt > 4 * cur_txok_cnt) {
if (!rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
edca_be_dl);
rtlpriv->dm.is_cur_rdlstate = true;
}
} else {
if (rtlpriv->dm.is_cur_rdlstate ||
!rtlpriv->dm.current_turbo_edca) {
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
edca_be_ul);
rtlpriv->dm.is_cur_rdlstate = false;
}
}
rtlpriv->dm.current_turbo_edca = true;
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
(u8 *) (&tmp));
rtlpriv->dm.current_turbo_edca = false;
}
}
exit:
rtlpriv->dm.is_any_nonbepkts = false;
last_txok_cnt = rtlpriv->stats.txbytesunicast;
last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
}
static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
0x0a, 0x09, 0x08, 0x07, 0x06,
0x05, 0x04, 0x04, 0x03, 0x02
};
int i;
u32 u4tmp;
u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
rtlpriv->dm.thermalvalue_rxgain)]) << 12;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
}
static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
u8 *cck_index_old)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int i;
unsigned long flag = 0;
long temp_cck;
/* Query CCK default setting From 0xa24 */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
BMASKDWORD) & BMASKCCK;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
if (rtlpriv->dm.cck_inch14) {
if (!memcmp((void *)&temp_cck,
(void *)&cckswing_table_ch14[i][2], 4)) {
*cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
RCCK0_TXFILTER2, temp_cck,
*cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
} else {
if (!memcmp((void *) &temp_cck,
&cckswing_table_ch1ch13[i][2], 4)) {
*cck_index_old = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
RCCK0_TXFILTER2, temp_cck,
*cck_index_old,
rtlpriv->dm.cck_inch14);
break;
}
}
}
*temp_cckg = temp_cck;
}
static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
bool *internal_pa, u8 thermalvalue, u8 delta,
u8 rf, struct rtl_efuse *rtlefuse,
struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
u8 index_mapping[5][INDEX_MAPPING_NUM],
u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
{
int i;
u8 index;
u8 offset = 0;
for (i = 0; i < rf; i++) {
if (rtlhal->macphymode == DUALMAC_DUALPHY &&
rtlhal->interfaceindex == 1) /* MAC 1 5G */
*internal_pa = rtlefuse->internal_pa_5g[1];
else
*internal_pa = rtlefuse->internal_pa_5g[i];
if (*internal_pa) {
if (rtlhal->interfaceindex == 1 || i == rf)
offset = 4;
else
offset = 0;
if (rtlphy->current_channel >= 100 &&
rtlphy->current_channel <= 165)
offset += 2;
} else {
if (rtlhal->interfaceindex == 1 || i == rf)
offset = 2;
else
offset = 0;
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter)
offset++;
if (*internal_pa) {
if (delta > INDEX_MAPPING_NUM - 1)
index = index_mapping_pa[offset]
[INDEX_MAPPING_NUM - 1];
else
index =
index_mapping_pa[offset][delta];
} else {
if (delta > INDEX_MAPPING_NUM - 1)
index =
index_mapping[offset][INDEX_MAPPING_NUM - 1];
else
index = index_mapping[offset][delta];
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
if (*internal_pa && thermalvalue > 0x12) {
ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
((delta / 2) * 3 + (delta % 2));
} else {
ofdm_index[i] -= index;
}
} else {
ofdm_index[i] += index;
}
}
}
static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
u8 offset, thermalvalue_avg_count = 0;
u32 thermalvalue_avg = 0;
bool internal_pa = false;
long ele_a = 0, ele_d, temp_cck, val_x, value32;
long val_y, ele_c = 0;
u8 ofdm_index[2];
u8 cck_index = 0;
u8 ofdm_index_old[2];
u8 cck_index_old = 0;
u8 index;
int i;
bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
u8 indexforchannel =
rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
u8 index_mapping[5][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, decrease power */
{0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
/* 5G, path A/MAC 0, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, decrease power */
{0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
/* 5G, path B/MAC 1, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
/* 2.4G, for decreas power */
{0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
};
u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
/* 5G, path A/MAC 0, ch36-64, decrease power */
{0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
/* 5G, path A/MAC 0, ch36-64, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path A/MAC 0, ch100-165, decrease power */
{0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
/* 5G, path A/MAC 0, ch100-165, increase power */
{0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, ch36-64, decrease power */
{0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
/* 5G, path B/MAC 1, ch36-64, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
/* 5G, path B/MAC 1, ch100-165, decrease power */
{0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
/* 5G, path B/MAC 1, ch100-165, increase power */
{0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
};
rtlpriv->dm.txpower_trackinginit = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
thermalvalue,
rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
rtl92d_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
if (is2t)
rf = 2;
else
rf = 1;
if (thermalvalue) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD) & BMASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
ROFDM0_XATxIQIMBALANCE,
ele_d, ofdm_index_old[0]);
break;
}
}
if (is2t) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD) & BMASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d ==
(ofdmswing_table[i] & BMASKOFDM_D)) {
ofdm_index_old[1] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
"Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XBTxIQIMBALANCE, ele_d,
ofdm_index_old[1]);
break;
}
}
}
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
} else {
temp_cck = 0x090e1317;
cck_index_old = 12;
}
if (!rtlpriv->dm.thermalvalue) {
rtlpriv->dm.thermalvalue =
rtlefuse->eeprom_thermalmeter;
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtlpriv->dm.thermalvalue_rxgain =
rtlefuse->eeprom_thermalmeter;
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
}
if (rtlhal->reloadtxpowerindex) {
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"reload ofdm index for band switch\n");
}
rtlpriv->dm.thermalvalue_avg
[rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
rtlpriv->dm.thermalvalue_avg_index++;
if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
rtlpriv->dm.thermalvalue_avg_index = 0;
for (i = 0; i < AVG_THERMAL_NUM; i++) {
if (rtlpriv->dm.thermalvalue_avg[i]) {
thermalvalue_avg +=
rtlpriv->dm.thermalvalue_avg[i];
thermalvalue_avg_count++;
}
}
if (thermalvalue_avg_count)
thermalvalue = (u8) (thermalvalue_avg /
thermalvalue_avg_count);
if (rtlhal->reloadtxpowerindex) {
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
rtlhal->reloadtxpowerindex = false;
rtlpriv->dm.done_txpower = false;
} else if (rtlpriv->dm.done_txpower) {
delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
(thermalvalue - rtlpriv->dm.thermalvalue) :
(rtlpriv->dm.thermalvalue - thermalvalue);
} else {
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
}
delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
(thermalvalue - rtlpriv->dm.thermalvalue_lck) :
(rtlpriv->dm.thermalvalue_lck - thermalvalue);
delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
delta_rxgain =
(thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
(thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
(rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
thermalvalue, rtlpriv->dm.thermalvalue,
rtlefuse->eeprom_thermalmeter, delta, delta_lck,
delta_iqk);
if ((delta_lck > rtlefuse->delta_lck) &&
(rtlefuse->delta_lck != 0)) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtl92d_phy_lc_calibrate(hw);
}
if (delta > 0 && rtlpriv->dm.txpower_track_control) {
rtlpriv->dm.done_txpower = true;
delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
(thermalvalue - rtlefuse->eeprom_thermalmeter) :
(rtlefuse->eeprom_thermalmeter - thermalvalue);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
offset = 4;
if (delta > INDEX_MAPPING_NUM - 1)
index = index_mapping[offset]
[INDEX_MAPPING_NUM - 1];
else
index = index_mapping[offset][delta];
if (thermalvalue > rtlpriv->dm.thermalvalue) {
for (i = 0; i < rf; i++)
ofdm_index[i] -= delta;
cck_index -= delta;
} else {
for (i = 0; i < rf; i++)
ofdm_index[i] += index;
cck_index += index;
}
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
rtl92d_bandtype_5G(rtlhal, ofdm_index,
&internal_pa, thermalvalue,
delta, rf, rtlefuse, rtlpriv,
rtlphy, index_mapping,
index_mapping_internal_pa);
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.ofdm_index[1],
rtlpriv->dm.cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
rtlpriv->dm.ofdm_index[0],
rtlpriv->dm.cck_index);
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
else if (ofdm_index[i] < ofdm_min_index)
ofdm_index[i] = ofdm_min_index;
}
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
if (cck_index > CCK_TABLE_SIZE - 1) {
cck_index = CCK_TABLE_SIZE - 1;
} else if (internal_pa ||
rtlhal->current_bandtype ==
BAND_ON_2_4G) {
if (ofdm_index[i] <
ofdm_min_index_internal_pa)
ofdm_index[i] =
ofdm_min_index_internal_pa;
} else if (cck_index < 0) {
cck_index = 0;
}
}
if (is2t) {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
ofdm_index[0], ofdm_index[1],
cck_index);
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"new OFDM_A_index=0x%x,cck_index = 0x%x\n",
ofdm_index[0], cck_index);
}
ele_d = (ofdmswing_table[(u8) ofdm_index[0]] &
0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][0];
val_y = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][1];
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
ele_a =
((val_x * ele_d) >> 8) & 0x000003FF;
/* new element C = element D x Y */
if ((val_y & 0x00000200) != 0)
val_y = val_y | 0xFFFFFC00;
ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
/* wirte new elements A, C, D to regC80 and
* regC94, element B is always 0 */
value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
16) | ele_a;
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
value32);
} else {
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
BMASKDWORD,
ofdmswing_table
[(u8)ofdm_index[0]]);
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(24), 0x00);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
rtlhal->interfaceindex,
val_x, val_y, ele_a, ele_c, ele_d,
val_x, val_y);
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* Adjust CCK according to IQK result */
if (!rtlpriv->dm.cck_inch14) {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch1ch13
[(u8)cck_index][0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch1ch13
[(u8)cck_index][1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch1ch13
[(u8)cck_index][2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch1ch13
[(u8)cck_index][3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch1ch13
[(u8)cck_index][4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch1ch13
[(u8)cck_index][5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch1ch13
[(u8)cck_index][6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch1ch13
[(u8)cck_index][7]);
} else {
rtl_write_byte(rtlpriv, 0xa22,
cckswing_table_ch14
[(u8)cck_index][0]);
rtl_write_byte(rtlpriv, 0xa23,
cckswing_table_ch14
[(u8)cck_index][1]);
rtl_write_byte(rtlpriv, 0xa24,
cckswing_table_ch14
[(u8)cck_index][2]);
rtl_write_byte(rtlpriv, 0xa25,
cckswing_table_ch14
[(u8)cck_index][3]);
rtl_write_byte(rtlpriv, 0xa26,
cckswing_table_ch14
[(u8)cck_index][4]);
rtl_write_byte(rtlpriv, 0xa27,
cckswing_table_ch14
[(u8)cck_index][5]);
rtl_write_byte(rtlpriv, 0xa28,
cckswing_table_ch14
[(u8)cck_index][6]);
rtl_write_byte(rtlpriv, 0xa29,
cckswing_table_ch14
[(u8)cck_index][7]);
}
}
if (is2t) {
ele_d = (ofdmswing_table[(u8) ofdm_index[1]] &
0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][4];
val_y = rtlphy->iqk_matrix_regsetting
[indexforchannel].value[0][5];
if (val_x != 0) {
if ((val_x & 0x00000200) != 0)
/* consider minus */
val_x = val_x | 0xFFFFFC00;
ele_a = ((val_x * ele_d) >> 8) &
0x000003FF;
/* new element C = element D x Y */
if ((val_y & 0x00000200) != 0)
val_y =
val_y | 0xFFFFFC00;
ele_c =
((val_y *
ele_d) >> 8) & 0x00003FF;
/* write new elements A, C, D to regC88
* and regC9C, element B is always 0
*/
value32 = (ele_d << 22) |
((ele_c & 0x3F) << 16) |
ele_a;
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
BMASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), value32);
} else {
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
BMASKDWORD,
ofdmswing_table
[(u8) ofdm_index[1]]);
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
BMASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
val_x, val_y, ele_a, ele_c,
ele_d, val_x, val_y);
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
BRFREGOFFSETMASK));
}
if ((delta_iqk > rtlefuse->delta_iqk) &&
(rtlefuse->delta_iqk != 0)) {
rtl92d_phy_reset_iqk_result(hw);
rtlpriv->dm.thermalvalue_iqk = thermalvalue;
rtl92d_phy_iq_calibrate(hw);
}
if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G
&& thermalvalue <= rtlefuse->eeprom_thermalmeter) {
rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
rtl92d_dm_rxgain_tracking_thermalmeter(hw);
}
if (rtlpriv->dm.txpower_track_control)
rtlpriv->dm.thermalvalue = thermalvalue;
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
rtlpriv->dm.txpower_track_control = true;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"pMgntInfo->txpower_tracking = %d\n",
rtlpriv->dm.txpower_tracking);
}
void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
static u8 tm_trigger;
if (!rtlpriv->dm.txpower_tracking)
return;
if (!tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
BIT(16), 0x03);
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Trigger 92S Thermal Meter!!\n");
tm_trigger = 1;
return;
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Schedule TxPowerTracking direct call!!\n");
rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
tm_trigger = 0;
}
}
void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rate_adaptive *ra = &(rtlpriv->ra);
ra->ratr_state = DM_RATR_STA_INIT;
ra->pre_ratr_state = DM_RATR_STA_INIT;
if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
rtlpriv->dm.useramask = true;
else
rtlpriv->dm.useramask = false;
}
void rtl92d_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl92d_dm_diginit(hw);
rtl92d_dm_init_dynamic_txpower(hw);
rtl92d_dm_init_edca_turbo(hw);
rtl92d_dm_init_rate_adaptive_mask(hw);
rtl92d_dm_initialize_txpower_tracking(hw);
}
void rtl92d_dm_watchdog(struct ieee80211_hw *hw)
{
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool fw_current_inpsmode = false;
bool fwps_awake = true;
/* 1. RF is OFF. (No need to do DM.)
* 2. Fw is under power saving mode for FwLPS.
* (Prevent from SW/FW I/O racing.)
* 3. IPS workitem is scheduled. (Prevent from IPS sequence
* to be swapped with DM.
* 4. RFChangeInProgress is TRUE.
* (Prevent from broken by IPS/HW/SW Rf off.) */
if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
fwps_awake) && (!ppsc->rfchange_inprogress)) {
rtl92d_dm_pwdb_monitor(hw);
rtl92d_dm_false_alarm_counter_statistics(hw);
rtl92d_dm_find_minimum_rssi(hw);
rtl92d_dm_dig(hw);
/* rtl92d_dm_dynamic_bb_powersaving(hw); */
rtl92d_dm_dynamic_txpower(hw);
/* rtl92d_dm_check_txpower_tracking_thermal_meter(hw); */
/* rtl92d_dm_refresh_rate_adaptive_mask(hw); */
/* rtl92d_dm_interrupt_migration(hw); */
rtl92d_dm_check_edca_turbo(hw);
}
}
| gpl-2.0 |
n3ocort3x/one_x_2.6 | drivers/isdn/mISDN/fsm.c | 5074 | 4430 | /*
* finite state machine implementation
*
* Author Karsten Keil <kkeil@novell.com>
*
* Thanks to Jan den Ouden
* Fritz Elfert
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
#include "fsm.h"
#define FSM_TIMER_DEBUG 0
void
mISDN_FsmNew(struct Fsm *fsm,
struct FsmNode *fnlist, int fncount)
{
int i;
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
fsm->event_count, GFP_KERNEL);
for (i = 0; i < fncount; i++)
if ((fnlist[i].state >= fsm->state_count) ||
(fnlist[i].event >= fsm->event_count)) {
printk(KERN_ERR
"mISDN_FsmNew Error: %d st(%ld/%ld) ev(%ld/%ld)\n",
i, (long)fnlist[i].state, (long)fsm->state_count,
(long)fnlist[i].event, (long)fsm->event_count);
} else
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
}
EXPORT_SYMBOL(mISDN_FsmNew);
void
mISDN_FsmFree(struct Fsm *fsm)
{
kfree((void *) fsm->jumpmatrix);
}
EXPORT_SYMBOL(mISDN_FsmFree);
int
mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg)
{
FSMFNPTR r;
if ((fi->state >= fi->fsm->state_count) ||
(event >= fi->fsm->event_count)) {
printk(KERN_ERR
"mISDN_FsmEvent Error st(%ld/%ld) ev(%d/%ld)\n",
(long)fi->state, (long)fi->fsm->state_count, event,
(long)fi->fsm->event_count);
return 1;
}
r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state];
if (r) {
if (fi->debug)
fi->printdebug(fi, "State %s Event %s",
fi->fsm->strState[fi->state],
fi->fsm->strEvent[event]);
r(fi, event, arg);
return 0;
} else {
if (fi->debug)
fi->printdebug(fi, "State %s Event %s no action",
fi->fsm->strState[fi->state],
fi->fsm->strEvent[event]);
return 1;
}
}
EXPORT_SYMBOL(mISDN_FsmEvent);
void
mISDN_FsmChangeState(struct FsmInst *fi, int newstate)
{
fi->state = newstate;
if (fi->debug)
fi->printdebug(fi, "ChangeState %s",
fi->fsm->strState[newstate]);
}
EXPORT_SYMBOL(mISDN_FsmChangeState);
static void
FsmExpireTimer(struct FsmTimer *ft)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
#endif
mISDN_FsmEvent(ft->fi, ft->event, ft->arg);
}
void
mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
{
ft->fi = fi;
ft->tl.function = (void *) FsmExpireTimer;
ft->tl.data = (long) ft;
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft);
#endif
init_timer(&ft->tl);
}
EXPORT_SYMBOL(mISDN_FsmInitTimer);
void
mISDN_FsmDelTimer(struct FsmTimer *ft, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmDelTimer %lx %d",
(long) ft, where);
#endif
del_timer(&ft->tl);
}
EXPORT_SYMBOL(mISDN_FsmDelTimer);
int
mISDN_FsmAddTimer(struct FsmTimer *ft,
int millisec, int event, void *arg, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmAddTimer %lx %d %d",
(long) ft, millisec, where);
#endif
if (timer_pending(&ft->tl)) {
if (ft->fi->debug) {
printk(KERN_WARNING
"mISDN_FsmAddTimer: timer already active!\n");
ft->fi->printdebug(ft->fi,
"mISDN_FsmAddTimer already active!");
}
return -1;
}
init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&ft->tl);
return 0;
}
EXPORT_SYMBOL(mISDN_FsmAddTimer);
void
mISDN_FsmRestartTimer(struct FsmTimer *ft,
int millisec, int event, void *arg, int where)
{
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmRestartTimer %lx %d %d",
(long) ft, millisec, where);
#endif
if (timer_pending(&ft->tl))
del_timer(&ft->tl);
init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&ft->tl);
}
EXPORT_SYMBOL(mISDN_FsmRestartTimer);
| gpl-2.0 |
bbelos/rk3188-kernel | fs/ufs/truncate.c | 11474 | 13024 | /*
* linux/fs/ufs/truncate.c
*
* Copyright (C) 1998
* Daniel Pirkl <daniel.pirkl@email.cz>
* Charles University, Faculty of Mathematics and Physics
*
* from
*
* linux/fs/ext2/truncate.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/truncate.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*/
/*
* Real random numbers for secure rm added 94/02/18
* Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
*/
/*
* Adoptation to use page cache and UFS2 write support by
* Evgeniy Dushistov <dushistov@mail.ru>, 2006-2007
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/time.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/sched.h>
#include "ufs_fs.h"
#include "ufs.h"
#include "swab.h"
#include "util.h"
/*
* Secure deletion currently doesn't work. It interacts very badly
* with buffers shared with memory mappings, and for that reason
* can't be done in the truncate() routines. It should instead be
* done separately in "release()" before calling the truncate routines
* that will release the actual file blocks.
*
* Linus
*/
#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
static int ufs_trunc_direct(struct inode *inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block * sb;
struct ufs_sb_private_info * uspi;
void *p;
u64 frag1, frag2, frag3, frag4, block1, block2;
unsigned frag_to_free, free_count;
unsigned i, tmp;
int retry;
UFSD("ENTER: ino %lu\n", inode->i_ino);
sb = inode->i_sb;
uspi = UFS_SB(sb)->s_uspi;
frag_to_free = 0;
free_count = 0;
retry = 0;
frag1 = DIRECT_FRAGMENT;
frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
frag3 = frag4 & ~uspi->s_fpbmask;
block1 = block2 = 0;
if (frag2 > frag3) {
frag2 = frag4;
frag3 = frag4 = 0;
} else if (frag2 < frag3) {
block1 = ufs_fragstoblks (frag2);
block2 = ufs_fragstoblks (frag3);
}
UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
" frag3 %llu, frag4 %llu\n", inode->i_ino,
(unsigned long long)frag1, (unsigned long long)frag2,
(unsigned long long)block1, (unsigned long long)block2,
(unsigned long long)frag3, (unsigned long long)frag4);
if (frag1 >= frag2)
goto next1;
/*
* Free first free fragments
*/
p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
tmp = ufs_data_ptr_to_cpu(sb, p);
if (!tmp )
ufs_panic (sb, "ufs_trunc_direct", "internal error");
frag2 -= frag1;
frag1 = ufs_fragnum (frag1);
ufs_free_fragments(inode, tmp + frag1, frag2);
mark_inode_dirty(inode);
frag_to_free = tmp + frag1;
next1:
/*
* Free whole blocks
*/
for (i = block1 ; i < block2; i++) {
p = ufs_get_direct_data_ptr(uspi, ufsi, i);
tmp = ufs_data_ptr_to_cpu(sb, p);
if (!tmp)
continue;
ufs_data_ptr_clear(uspi, p);
if (free_count == 0) {
frag_to_free = tmp;
free_count = uspi->s_fpb;
} else if (free_count > 0 && frag_to_free == tmp - free_count)
free_count += uspi->s_fpb;
else {
ufs_free_blocks (inode, frag_to_free, free_count);
frag_to_free = tmp;
free_count = uspi->s_fpb;
}
mark_inode_dirty(inode);
}
if (free_count > 0)
ufs_free_blocks (inode, frag_to_free, free_count);
if (frag3 >= frag4)
goto next3;
/*
* Free last free fragments
*/
p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
tmp = ufs_data_ptr_to_cpu(sb, p);
if (!tmp )
ufs_panic(sb, "ufs_truncate_direct", "internal error");
frag4 = ufs_fragnum (frag4);
ufs_data_ptr_clear(uspi, p);
ufs_free_fragments (inode, tmp, frag4);
mark_inode_dirty(inode);
next3:
UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p)
{
struct super_block * sb;
struct ufs_sb_private_info * uspi;
struct ufs_buffer_head * ind_ubh;
void *ind;
u64 tmp, indirect_block, i, frag_to_free;
unsigned free_count;
int retry;
UFSD("ENTER: ino %lu, offset %llu, p: %p\n",
inode->i_ino, (unsigned long long)offset, p);
BUG_ON(!p);
sb = inode->i_sb;
uspi = UFS_SB(sb)->s_uspi;
frag_to_free = 0;
free_count = 0;
retry = 0;
tmp = ufs_data_ptr_to_cpu(sb, p);
if (!tmp)
return 0;
ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
ubh_brelse (ind_ubh);
return 1;
}
if (!ind_ubh) {
ufs_data_ptr_clear(uspi, p);
return 0;
}
indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
for (i = indirect_block; i < uspi->s_apb; i++) {
ind = ubh_get_data_ptr(uspi, ind_ubh, i);
tmp = ufs_data_ptr_to_cpu(sb, ind);
if (!tmp)
continue;
ufs_data_ptr_clear(uspi, ind);
ubh_mark_buffer_dirty(ind_ubh);
if (free_count == 0) {
frag_to_free = tmp;
free_count = uspi->s_fpb;
} else if (free_count > 0 && frag_to_free == tmp - free_count)
free_count += uspi->s_fpb;
else {
ufs_free_blocks (inode, frag_to_free, free_count);
frag_to_free = tmp;
free_count = uspi->s_fpb;
}
mark_inode_dirty(inode);
}
if (free_count > 0) {
ufs_free_blocks (inode, frag_to_free, free_count);
}
for (i = 0; i < uspi->s_apb; i++)
if (!ufs_is_data_ptr_zero(uspi,
ubh_get_data_ptr(uspi, ind_ubh, i)))
break;
if (i >= uspi->s_apb) {
tmp = ufs_data_ptr_to_cpu(sb, p);
ufs_data_ptr_clear(uspi, p);
ufs_free_blocks (inode, tmp, uspi->s_fpb);
mark_inode_dirty(inode);
ubh_bforget(ind_ubh);
ind_ubh = NULL;
}
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh))
ubh_sync_block(ind_ubh);
ubh_brelse (ind_ubh);
UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p)
{
struct super_block * sb;
struct ufs_sb_private_info * uspi;
struct ufs_buffer_head *dind_bh;
u64 i, tmp, dindirect_block;
void *dind;
int retry = 0;
UFSD("ENTER: ino %lu\n", inode->i_ino);
sb = inode->i_sb;
uspi = UFS_SB(sb)->s_uspi;
dindirect_block = (DIRECT_BLOCK > offset)
? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
retry = 0;
tmp = ufs_data_ptr_to_cpu(sb, p);
if (!tmp)
return 0;
dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
ubh_brelse (dind_bh);
return 1;
}
if (!dind_bh) {
ufs_data_ptr_clear(uspi, p);
return 0;
}
for (i = dindirect_block ; i < uspi->s_apb ; i++) {
dind = ubh_get_data_ptr(uspi, dind_bh, i);
tmp = ufs_data_ptr_to_cpu(sb, dind);
if (!tmp)
continue;
retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
ubh_mark_buffer_dirty(dind_bh);
}
for (i = 0; i < uspi->s_apb; i++)
if (!ufs_is_data_ptr_zero(uspi,
ubh_get_data_ptr(uspi, dind_bh, i)))
break;
if (i >= uspi->s_apb) {
tmp = ufs_data_ptr_to_cpu(sb, p);
ufs_data_ptr_clear(uspi, p);
ufs_free_blocks(inode, tmp, uspi->s_fpb);
mark_inode_dirty(inode);
ubh_bforget(dind_bh);
dind_bh = NULL;
}
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh))
ubh_sync_block(dind_bh);
ubh_brelse (dind_bh);
UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
static int ufs_trunc_tindirect(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
struct ufs_inode_info *ufsi = UFS_I(inode);
struct ufs_buffer_head * tind_bh;
u64 tindirect_block, tmp, i;
void *tind, *p;
int retry;
UFSD("ENTER: ino %lu\n", inode->i_ino);
retry = 0;
tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
p = ufs_get_direct_data_ptr(uspi, ufsi, UFS_TIND_BLOCK);
if (!(tmp = ufs_data_ptr_to_cpu(sb, p)))
return 0;
tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
if (tmp != ufs_data_ptr_to_cpu(sb, p)) {
ubh_brelse (tind_bh);
return 1;
}
if (!tind_bh) {
ufs_data_ptr_clear(uspi, p);
return 0;
}
for (i = tindirect_block ; i < uspi->s_apb ; i++) {
tind = ubh_get_data_ptr(uspi, tind_bh, i);
retry |= ufs_trunc_dindirect(inode, UFS_NDADDR +
uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
ubh_mark_buffer_dirty(tind_bh);
}
for (i = 0; i < uspi->s_apb; i++)
if (!ufs_is_data_ptr_zero(uspi,
ubh_get_data_ptr(uspi, tind_bh, i)))
break;
if (i >= uspi->s_apb) {
tmp = ufs_data_ptr_to_cpu(sb, p);
ufs_data_ptr_clear(uspi, p);
ufs_free_blocks(inode, tmp, uspi->s_fpb);
mark_inode_dirty(inode);
ubh_bforget(tind_bh);
tind_bh = NULL;
}
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh))
ubh_sync_block(tind_bh);
ubh_brelse (tind_bh);
UFSD("EXIT: ino %lu\n", inode->i_ino);
return retry;
}
static int ufs_alloc_lastblock(struct inode *inode)
{
int err = 0;
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
unsigned i, end;
sector_t lastfrag;
struct page *lastpage;
struct buffer_head *bh;
u64 phys64;
lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
if (!lastfrag)
goto out;
lastfrag--;
lastpage = ufs_get_locked_page(mapping, lastfrag >>
(PAGE_CACHE_SHIFT - inode->i_blkbits));
if (IS_ERR(lastpage)) {
err = -EIO;
goto out;
}
end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
bh = page_buffers(lastpage);
for (i = 0; i < end; ++i)
bh = bh->b_this_page;
err = ufs_getfrag_block(inode, lastfrag, bh, 1);
if (unlikely(err))
goto out_unlock;
if (buffer_new(bh)) {
clear_buffer_new(bh);
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
/*
* we do not zeroize fragment, because of
* if it maped to hole, it already contains zeroes
*/
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
set_page_dirty(lastpage);
}
if (lastfrag >= UFS_IND_FRAGMENT) {
end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
phys64 = bh->b_blocknr + 1;
for (i = 0; i < end; ++i) {
bh = sb_getblk(sb, i + phys64);
lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
sync_dirty_buffer(bh);
brelse(bh);
}
}
out_unlock:
ufs_put_locked_page(lastpage);
out:
return err;
}
int ufs_truncate(struct inode *inode, loff_t old_i_size)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
int retry, err = 0;
UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
inode->i_ino, (unsigned long long)i_size_read(inode),
(unsigned long long)old_i_size);
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return -EINVAL;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
err = ufs_alloc_lastblock(inode);
if (err) {
i_size_write(inode, old_i_size);
goto out;
}
block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
while (1) {
retry = ufs_trunc_direct(inode);
retry |= ufs_trunc_indirect(inode, UFS_IND_BLOCK,
ufs_get_direct_data_ptr(uspi, ufsi,
UFS_IND_BLOCK));
retry |= ufs_trunc_dindirect(inode, UFS_IND_BLOCK + uspi->s_apb,
ufs_get_direct_data_ptr(uspi, ufsi,
UFS_DIND_BLOCK));
retry |= ufs_trunc_tindirect (inode);
if (!retry)
break;
if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
ufs_sync_inode (inode);
yield();
}
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
ufsi->i_lastfrag = DIRECT_FRAGMENT;
mark_inode_dirty(inode);
out:
UFSD("EXIT: err %d\n", err);
return err;
}
int ufs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
unsigned int ia_valid = attr->ia_valid;
int error;
error = inode_change_ok(inode, attr);
if (error)
return error;
if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
loff_t old_i_size = inode->i_size;
/* XXX(truncate): truncate_setsize should be called last */
truncate_setsize(inode, attr->ia_size);
lock_ufs(inode->i_sb);
error = ufs_truncate(inode, old_i_size);
unlock_ufs(inode->i_sb);
if (error)
return error;
}
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
}
const struct inode_operations ufs_file_inode_operations = {
.setattr = ufs_setattr,
};
| gpl-2.0 |
davidepianca98/android_kernel_samsung_i9105P | arch/alpha/kernel/console.c | 11474 | 2057 | /*
* linux/arch/alpha/kernel/console.c
*
* Architecture-specific specific support for VGA device on
* non-0 I/O hose
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/vt.h>
#include <asm/vga.h>
#include <asm/machvec.h>
#include "pci_impl.h"
#ifdef CONFIG_VGA_HOSE
struct pci_controller *pci_vga_hose;
static struct resource alpha_vga = {
.name = "alpha-vga+",
.start = 0x3C0,
.end = 0x3DF
};
static struct pci_controller * __init
default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2)
{
if (h2->index < h1->index)
return h2;
return h1;
}
void __init
locate_and_init_vga(void *(*sel_func)(void *, void *))
{
struct pci_controller *hose = NULL;
struct pci_dev *dev = NULL;
/* Default the select function */
if (!sel_func) sel_func = (void *)default_vga_hose_select;
/* Find the console VGA device */
for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) {
if (!hose)
hose = dev->sysdata;
else
hose = sel_func(hose, dev->sysdata);
}
/* Did we already initialize the correct one? Is there one? */
if (!hose || (conswitchp == &vga_con && pci_vga_hose == hose))
return;
/* Create a new VGA ioport resource WRT the hose it is on. */
alpha_vga.start += hose->io_space->start;
alpha_vga.end += hose->io_space->start;
request_resource(hose->io_space, &alpha_vga);
/* Set the VGA hose and init the new console. */
pci_vga_hose = hose;
take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
}
void __init
find_console_vga_hose(void)
{
u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
if (pu64[7] == 3) { /* TERM_TYPE == graphics */
struct pci_controller *hose;
int h = (pu64[30] >> 24) & 0xff; /* console hose # */
/*
* Our hose numbering DOES match the console's, so find
* the right one...
*/
for (hose = hose_head; hose; hose = hose->next) {
if (hose->index == h) break;
}
if (hose) {
printk("Console graphics on hose %d\n", h);
pci_vga_hose = hose;
}
}
}
#endif
| gpl-2.0 |
pantech-msm8974/android_kernel_pantech_msm8974 | drivers/edac/amd8131_edac.c | 12498 | 10975 | /*
* amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
*
* Copyright (c) 2008 Wind River Systems, Inc.
*
* Authors: Cao Qingtao <qingtao.cao@windriver.com>
* Benjamin Walsh <benjamin.walsh@windriver.com>
* Hu Yongqi <yongqi.hu@windriver.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/edac.h>
#include <linux/pci_ids.h>
#include "edac_core.h"
#include "edac_module.h"
#include "amd8131_edac.h"
#define AMD8131_EDAC_REVISION " Ver: 1.0.0"
#define AMD8131_EDAC_MOD_STR "amd8131_edac"
/* Wrapper functions for accessing PCI configuration space */
static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
{
int ret;
ret = pci_read_config_dword(dev, reg, val32);
if (ret != 0)
printk(KERN_ERR AMD8131_EDAC_MOD_STR
" PCI Access Read Error at 0x%x\n", reg);
}
static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
{
int ret;
ret = pci_write_config_dword(dev, reg, val32);
if (ret != 0)
printk(KERN_ERR AMD8131_EDAC_MOD_STR
" PCI Access Write Error at 0x%x\n", reg);
}
static char * const bridge_str[] = {
[NORTH_A] = "NORTH A",
[NORTH_B] = "NORTH B",
[SOUTH_A] = "SOUTH A",
[SOUTH_B] = "SOUTH B",
[NO_BRIDGE] = "NO BRIDGE",
};
/* Support up to two AMD8131 chipsets on a platform */
static struct amd8131_dev_info amd8131_devices[] = {
{
.inst = NORTH_A,
.devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
.ctl_name = "AMD8131_PCIX_NORTH_A",
},
{
.inst = NORTH_B,
.devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
.ctl_name = "AMD8131_PCIX_NORTH_B",
},
{
.inst = SOUTH_A,
.devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
.ctl_name = "AMD8131_PCIX_SOUTH_A",
},
{
.inst = SOUTH_B,
.devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
.ctl_name = "AMD8131_PCIX_SOUTH_B",
},
{.inst = NO_BRIDGE,},
};
static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
{
u32 val32;
struct pci_dev *dev = dev_info->dev;
/* First clear error detection flags */
edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
if (val32 & MEM_LIMIT_MASK)
edac_pci_write_dword(dev, REG_MEM_LIM, val32);
/* Clear Discard Timer Timedout flag */
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
if (val32 & INT_CTLR_DTS)
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
/* Clear CRC Error flag on link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
if (val32 & LNK_CTRL_CRCERR_A)
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
/* Clear CRC Error flag on link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
if (val32 & LNK_CTRL_CRCERR_B)
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
/*
* Then enable all error detections.
*
* Setup Discard Timer Sync Flood Enable,
* System Error Enable and Parity Error Enable.
*/
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
/* Enable overall SERR Error detection */
edac_pci_read_dword(dev, REG_STS_CMD, &val32);
val32 |= STS_CMD_SERREN;
edac_pci_write_dword(dev, REG_STS_CMD, val32);
/* Setup CRC Flood Enable for link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
val32 |= LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
/* Setup CRC Flood Enable for link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
val32 |= LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
}
static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
{
u32 val32;
struct pci_dev *dev = dev_info->dev;
/* Disable SERR, PERR and DTSE Error detection */
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
/* Disable overall System Error detection */
edac_pci_read_dword(dev, REG_STS_CMD, &val32);
val32 &= ~STS_CMD_SERREN;
edac_pci_write_dword(dev, REG_STS_CMD, val32);
/* Disable CRC Sync Flood on link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
val32 &= ~LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
/* Disable CRC Sync Flood on link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
val32 &= ~LNK_CTRL_CRCFEN;
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
}
static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
{
struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
struct pci_dev *dev = dev_info->dev;
u32 val32;
/* Check PCI-X Bridge Memory Base-Limit Register for errors */
edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
if (val32 & MEM_LIMIT_MASK) {
printk(KERN_INFO "Error(s) in mem limit register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
"RTA: %d, STA: %d, MDPE: %d\n",
val32 & MEM_LIMIT_DPE,
val32 & MEM_LIMIT_RSE,
val32 & MEM_LIMIT_RMA,
val32 & MEM_LIMIT_RTA,
val32 & MEM_LIMIT_STA,
val32 & MEM_LIMIT_MDPE);
val32 |= MEM_LIMIT_MASK;
edac_pci_write_dword(dev, REG_MEM_LIM, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check if Discard Timer timed out */
edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
if (val32 & INT_CTLR_DTS) {
printk(KERN_INFO "Error(s) in interrupt and control register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
val32 |= INT_CTLR_DTS;
edac_pci_write_dword(dev, REG_INT_CTLR, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check if CRC error happens on link side A */
edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
if (val32 & LNK_CTRL_CRCERR_A) {
printk(KERN_INFO "Error(s) in link conf and control register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
val32 |= LNK_CTRL_CRCERR_A;
edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
/* Check if CRC error happens on link side B */
edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
if (val32 & LNK_CTRL_CRCERR_B) {
printk(KERN_INFO "Error(s) in link conf and control register "
"on %s bridge\n", dev_info->ctl_name);
printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
val32 |= LNK_CTRL_CRCERR_B;
edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
}
}
static struct amd8131_info amd8131_chipset = {
.err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
.devices = amd8131_devices,
.init = amd8131_pcix_init,
.exit = amd8131_pcix_exit,
.check = amd8131_pcix_check,
};
/*
* There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
* so amd8131_probe() would be called by kernel 4 times, with different
* address of pci_dev for each of them each time.
*/
static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct amd8131_dev_info *dev_info;
for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
dev_info++)
if (dev_info->devfn == dev->devfn)
break;
if (dev_info->inst == NO_BRIDGE) /* should never happen */
return -ENODEV;
/*
* We can't call pci_get_device() as we are used to do because
* there are 4 of them but pci_dev_get() instead.
*/
dev_info->dev = pci_dev_get(dev);
if (pci_enable_device(dev_info->dev)) {
pci_dev_put(dev_info->dev);
printk(KERN_ERR "failed to enable:"
"vendor %x, device %x, devfn %x, name %s\n",
PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
dev_info->devfn, dev_info->ctl_name);
return -ENODEV;
}
/*
* we do not allocate extra private structure for
* edac_pci_ctl_info, but make use of existing
* one instead.
*/
dev_info->edac_idx = edac_pci_alloc_index();
dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
if (!dev_info->edac_dev)
return -ENOMEM;
dev_info->edac_dev->pvt_info = dev_info;
dev_info->edac_dev->dev = &dev_info->dev->dev;
dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
dev_info->edac_dev->ctl_name = dev_info->ctl_name;
dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
if (edac_op_state == EDAC_OPSTATE_POLL)
dev_info->edac_dev->edac_check = amd8131_chipset.check;
if (amd8131_chipset.init)
amd8131_chipset.init(dev_info);
if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
dev_info->ctl_name);
edac_pci_free_ctl_info(dev_info->edac_dev);
return -ENODEV;
}
printk(KERN_INFO "added one device on AMD8131 "
"vendor %x, device %x, devfn %x, name %s\n",
PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
dev_info->devfn, dev_info->ctl_name);
return 0;
}
static void amd8131_remove(struct pci_dev *dev)
{
struct amd8131_dev_info *dev_info;
for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
dev_info++)
if (dev_info->devfn == dev->devfn)
break;
if (dev_info->inst == NO_BRIDGE) /* should never happen */
return;
if (dev_info->edac_dev) {
edac_pci_del_device(dev_info->edac_dev->dev);
edac_pci_free_ctl_info(dev_info->edac_dev);
}
if (amd8131_chipset.exit)
amd8131_chipset.exit(dev_info);
pci_dev_put(dev_info->dev);
}
static const struct pci_device_id amd8131_edac_pci_tbl[] = {
{
PCI_VEND_DEV(AMD, 8131_BRIDGE),
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = 0,
.class_mask = 0,
.driver_data = 0,
},
{
0,
} /* table is NULL-terminated */
};
MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
static struct pci_driver amd8131_edac_driver = {
.name = AMD8131_EDAC_MOD_STR,
.probe = amd8131_probe,
.remove = amd8131_remove,
.id_table = amd8131_edac_pci_tbl,
};
static int __init amd8131_edac_init(void)
{
printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
/* Only POLL mode supported so far */
edac_op_state = EDAC_OPSTATE_POLL;
return pci_register_driver(&amd8131_edac_driver);
}
static void __exit amd8131_edac_exit(void)
{
pci_unregister_driver(&amd8131_edac_driver);
}
module_init(amd8131_edac_init);
module_exit(amd8131_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
| gpl-2.0 |
linusw/linux-bfq | drivers/usb/host/ehci-sched.c | 211 | 66295 | /*
* Copyright (c) 2001-2004 by David Brownell
* Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* EHCI scheduled transaction support: interrupt, iso, split iso
* These are called "periodic" transactions in the EHCI spec.
*
* Note that for interrupt transfers, the QH/QTD manipulation is shared
* with the "asynchronous" transaction support (control/bulk transfers).
* The only real difference is in how interrupt transfers are scheduled.
*
* For ISO, we make an "iso_stream" head to serve the same role as a QH.
* It keeps track of every ITD (or SITD) that's linked, and holds enough
* pre-calculated schedule data to make appending to the queue be quick.
*/
static int ehci_get_frame(struct usb_hcd *hcd);
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *
periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
__hc32 tag)
{
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
return &periodic->qh->qh_next;
case Q_TYPE_FSTN:
return &periodic->fstn->fstn_next;
case Q_TYPE_ITD:
return &periodic->itd->itd_next;
/* case Q_TYPE_SITD: */
default:
return &periodic->sitd->sitd_next;
}
}
static __hc32 *
shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
__hc32 tag)
{
switch (hc32_to_cpu(ehci, tag)) {
/* our ehci_shadow.qh is actually software part */
case Q_TYPE_QH:
return &periodic->qh->hw->hw_next;
/* others are hw parts */
default:
return periodic->hw_next;
}
}
/* caller must hold ehci->lock */
static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &ehci->pshadow[frame];
__hc32 *hw_p = &ehci->periodic[frame];
union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(ehci, prev_p,
Q_NEXT_TYPE(ehci, *hw_p));
hw_p = shadow_next_periodic(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr)
return;
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
if (!ehci->use_dummy_qh ||
*shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
!= EHCI_LIST_END(ehci))
*hw_p = *shadow_next_periodic(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
else
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
}
/*-------------------------------------------------------------------------*/
/* Bandwidth and TT management */
/* Find the TT data structure for this device; create it if necessary */
static struct ehci_tt *find_tt(struct usb_device *udev)
{
struct usb_tt *utt = udev->tt;
struct ehci_tt *tt, **tt_index, **ptt;
unsigned port;
bool allocated_index = false;
if (!utt)
return NULL; /* Not below a TT */
/*
* Find/create our data structure.
* For hubs with a single TT, we get it directly.
* For hubs with multiple TTs, there's an extra level of pointers.
*/
tt_index = NULL;
if (utt->multi) {
tt_index = utt->hcpriv;
if (!tt_index) { /* Create the index array */
tt_index = kzalloc(utt->hub->maxchild *
sizeof(*tt_index), GFP_ATOMIC);
if (!tt_index)
return ERR_PTR(-ENOMEM);
utt->hcpriv = tt_index;
allocated_index = true;
}
port = udev->ttport - 1;
ptt = &tt_index[port];
} else {
port = 0;
ptt = (struct ehci_tt **) &utt->hcpriv;
}
tt = *ptt;
if (!tt) { /* Create the ehci_tt */
struct ehci_hcd *ehci =
hcd_to_ehci(bus_to_hcd(udev->bus));
tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
if (!tt) {
if (allocated_index) {
utt->hcpriv = NULL;
kfree(tt_index);
}
return ERR_PTR(-ENOMEM);
}
list_add_tail(&tt->tt_list, &ehci->tt_list);
INIT_LIST_HEAD(&tt->ps_list);
tt->usb_tt = utt;
tt->tt_port = port;
*ptt = tt;
}
return tt;
}
/* Release the TT above udev, if it's not in use */
static void drop_tt(struct usb_device *udev)
{
struct usb_tt *utt = udev->tt;
struct ehci_tt *tt, **tt_index, **ptt;
int cnt, i;
if (!utt || !utt->hcpriv)
return; /* Not below a TT, or never allocated */
cnt = 0;
if (utt->multi) {
tt_index = utt->hcpriv;
ptt = &tt_index[udev->ttport - 1];
/* How many entries are left in tt_index? */
for (i = 0; i < utt->hub->maxchild; ++i)
cnt += !!tt_index[i];
} else {
tt_index = NULL;
ptt = (struct ehci_tt **) &utt->hcpriv;
}
tt = *ptt;
if (!tt || !list_empty(&tt->ps_list))
return; /* never allocated, or still in use */
list_del(&tt->tt_list);
*ptt = NULL;
kfree(tt);
if (cnt == 1) {
utt->hcpriv = NULL;
kfree(tt_index);
}
}
static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
struct ehci_per_sched *ps)
{
dev_dbg(&ps->udev->dev,
"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
ps->ep->desc.bEndpointAddress,
(sign >= 0 ? "reserve" : "release"), type,
(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
ps->phase, ps->phase_uf, ps->period,
ps->usecs, ps->c_usecs, ps->cs_mask);
}
static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
struct ehci_qh *qh, int sign)
{
unsigned start_uf;
unsigned i, j, m;
int usecs = qh->ps.usecs;
int c_usecs = qh->ps.c_usecs;
int tt_usecs = qh->ps.tt_usecs;
struct ehci_tt *tt;
if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
return;
start_uf = qh->ps.bw_phase << 3;
bandwidth_dbg(ehci, sign, "intr", &qh->ps);
if (sign < 0) { /* Release bandwidth */
usecs = -usecs;
c_usecs = -c_usecs;
tt_usecs = -tt_usecs;
}
/* Entire transaction (high speed) or start-split (full/low speed) */
for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
i += qh->ps.bw_uperiod)
ehci->bandwidth[i] += usecs;
/* Complete-split (full/low speed) */
if (qh->ps.c_usecs) {
/* NOTE: adjustments needed for FSTN */
for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
i += qh->ps.bw_uperiod) {
for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
if (qh->ps.cs_mask & m)
ehci->bandwidth[i+j] += c_usecs;
}
}
}
/* FS/LS bus bandwidth */
if (tt_usecs) {
tt = find_tt(qh->ps.udev);
if (sign > 0)
list_add_tail(&qh->ps.ps_list, &tt->ps_list);
else
list_del(&qh->ps.ps_list);
for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
i += qh->ps.bw_period)
tt->bandwidth[i] += tt_usecs;
}
}
/*-------------------------------------------------------------------------*/
static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
struct ehci_tt *tt)
{
struct ehci_per_sched *ps;
unsigned uframe, uf, x;
u8 *budget_line;
if (!tt)
return;
memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
/* Add up the contributions from all the endpoints using this TT */
list_for_each_entry(ps, &tt->ps_list, ps_list) {
for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
uframe += ps->bw_uperiod) {
budget_line = &budget_table[uframe];
x = ps->tt_usecs;
/* propagate the time forward */
for (uf = ps->phase_uf; uf < 8; ++uf) {
x += budget_line[uf];
/* Each microframe lasts 125 us */
if (x <= 125) {
budget_line[uf] = x;
break;
}
budget_line[uf] = 125;
x -= 125;
}
}
}
}
static int __maybe_unused same_tt(struct usb_device *dev1,
struct usb_device *dev2)
{
if (!dev1->tt || !dev2->tt)
return 0;
if (dev1->tt != dev2->tt)
return 0;
if (dev1->tt->multi)
return dev1->ttport == dev2->ttport;
else
return 1;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
/* Which uframe does the low/fullspeed transfer start in?
*
* The parameter is the mask of ssplits in "H-frame" terms
* and this returns the transfer start uframe in "B-frame" terms,
* which allows both to match, e.g. a ssplit in "H-frame" uframe 0
* will cause a transfer in "B-frame" uframe 0. "B-frames" lag
* "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
*/
static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
{
unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
if (!smask) {
ehci_err(ehci, "invalid empty smask!\n");
/* uframe 7 can't have bw so this will indicate failure */
return 7;
}
return ffs(smask) - 1;
}
static const unsigned char
max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
/* carryover low/fullspeed bandwidth that crosses uframe boundries */
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
{
int i;
for (i = 0; i < 7; i++) {
if (max_tt_usecs[i] < tt_usecs[i]) {
tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
tt_usecs[i] = max_tt_usecs[i];
}
}
}
/*
* Return true if the device's tt's downstream bus is available for a
* periodic transfer of the specified length (usecs), starting at the
* specified frame/uframe. Note that (as summarized in section 11.19
* of the usb 2.0 spec) TTs can buffer multiple transactions for each
* uframe.
*
* The uframe parameter is when the fullspeed/lowspeed transfer
* should be executed in "B-frame" terms, which is the same as the
* highspeed ssplit's uframe (which is in "H-frame" terms). For example
* a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
* See the EHCI spec sec 4.5 and fig 4.7.
*
* This checks if the full/lowspeed bus, at the specified starting uframe,
* has the specified bandwidth available, according to rules listed
* in USB 2.0 spec section 11.18.1 fig 11-60.
*
* This does not check if the transfer would exceed the max ssplit
* limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
* since proper scheduling limits ssplits to less than 16 per uframe.
*/
static int tt_available(
struct ehci_hcd *ehci,
struct ehci_per_sched *ps,
struct ehci_tt *tt,
unsigned frame,
unsigned uframe
)
{
unsigned period = ps->bw_period;
unsigned usecs = ps->tt_usecs;
if ((period == 0) || (uframe >= 7)) /* error */
return 0;
for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
frame += period) {
unsigned i, uf;
unsigned short tt_usecs[8];
if (tt->bandwidth[frame] + usecs > 900)
return 0;
uf = frame << 3;
for (i = 0; i < 8; (++i, ++uf))
tt_usecs[i] = ehci->tt_budget[uf];
if (max_tt_usecs[uframe] <= tt_usecs[uframe])
return 0;
/* special case for isoc transfers larger than 125us:
* the first and each subsequent fully used uframe
* must be empty, so as to not illegally delay
* already scheduled transactions
*/
if (usecs > 125) {
int ufs = (usecs / 125);
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
if (tt_usecs[i] > 0)
return 0;
}
tt_usecs[uframe] += usecs;
carryover_tt_bandwidth(tt_usecs);
/* fail if the carryover pushed bw past the last uframe's limit */
if (max_tt_usecs[7] < tt_usecs[7])
return 0;
}
return 1;
}
#else
/* return true iff the device's transaction translator is available
* for a periodic transfer starting at the specified frame, using
* all the uframes in the mask.
*/
static int tt_no_collision(
struct ehci_hcd *ehci,
unsigned period,
struct usb_device *dev,
unsigned frame,
u32 uf_mask
)
{
if (period == 0) /* error */
return 0;
/* note bandwidth wastage: split never follows csplit
* (different dev or endpoint) until the next uframe.
* calling convention doesn't make that distinction.
*/
for (; frame < ehci->periodic_size; frame += period) {
union ehci_shadow here;
__hc32 type;
struct ehci_qh_hw *hw;
here = ehci->pshadow[frame];
type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
while (here.ptr) {
switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_ITD:
type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
here = here.itd->itd_next;
continue;
case Q_TYPE_QH:
hw = here.qh->hw;
if (same_tt(dev, here.qh->ps.udev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
hw->hw_info2);
/* "knows" no gap is needed */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
type = Q_NEXT_TYPE(ehci, hw->hw_next);
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
if (same_tt(dev, here.sitd->urb->dev)) {
u16 mask;
mask = hc32_to_cpu(ehci, here.sitd
->hw_uframe);
/* FIXME assumes no gap for IN! */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
here = here.sitd->sitd_next;
continue;
/* case Q_TYPE_FSTN: */
default:
ehci_dbg(ehci,
"periodic frame %d bogus type %d\n",
frame, type);
}
/* collision or error */
return 0;
}
}
/* no collision */
return 1;
}
#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
/*-------------------------------------------------------------------------*/
static void enable_periodic(struct ehci_hcd *ehci)
{
if (ehci->periodic_count++)
return;
/* Stop waiting to turn off the periodic schedule */
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
/* Don't start the schedule until PSS is 0 */
ehci_poll_PSS(ehci);
turn_on_io_watchdog(ehci);
}
static void disable_periodic(struct ehci_hcd *ehci)
{
if (--ehci->periodic_count)
return;
/* Don't turn off the schedule until PSS is 1 */
ehci_poll_PSS(ehci);
}
/*-------------------------------------------------------------------------*/
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; ehci 0.96+)
*/
static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->ps.period;
dev_dbg(&qh->ps.udev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
& (QH_CMASK | QH_SMASK),
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
union ehci_shadow *prev = &ehci->pshadow[i];
__hc32 *hw_p = &ehci->periodic[i];
union ehci_shadow here = *prev;
__hc32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
type = Q_NEXT_TYPE(ehci, *hw_p);
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
prev = periodic_next_shadow(ehci, prev, type);
hw_p = shadow_next_periodic(ehci, &here, type);
here = *prev;
}
/* sorting each branch by period (slow-->fast)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->ps.period > here.qh->ps.period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(ehci, qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
qh->unlink_reason = 0;
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
: (qh->ps.usecs * 8);
list_add(&qh->intr_node, &ehci->intr_qh_list);
/* maybe enable periodic schedule processing */
++ehci->intr_count;
enable_periodic(ehci);
}
static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
/*
* If qh is for a low/full-speed device, simply unlinking it
* could interfere with an ongoing split transaction. To unlink
* it safely would require setting the QH_INACTIVATE bit and
* waiting at least one frame, as described in EHCI 4.12.2.5.
*
* We won't bother with any of this. Instead, we assume that the
* only reason for unlinking an interrupt QH while the current URB
* is still active is to dequeue all the URBs (flush the whole
* endpoint queue).
*
* If rebalancing the periodic schedule is ever implemented, this
* approach will no longer be valid.
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->ps.period ? : 1;
for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
periodic_unlink(ehci, i, qh);
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
: (qh->ps.usecs * 8);
dev_dbg(&qh->ps.udev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->ps.period,
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
if (ehci->qh_scan_next == qh)
ehci->qh_scan_next = list_entry(qh->intr_node.next,
struct ehci_qh, intr_node);
list_del(&qh->intr_node);
}
static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
if (qh->qh_state != QH_STATE_LINKED ||
list_empty(&qh->unlink_node))
return;
list_del_init(&qh->unlink_node);
/*
* TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
* avoiding unnecessary CPU wakeup
*/
}
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* If the QH isn't linked then there's nothing we can do. */
if (qh->qh_state != QH_STATE_LINKED)
return;
/* if the qh is waiting for unlink, cancel it now */
cancel_unlink_wait_intr(ehci, qh);
qh_unlink_periodic(ehci, qh);
/* Make sure the unlinks are visible before starting the timer */
wmb();
/*
* The EHCI spec doesn't say how long it takes the controller to
* stop accessing an unlinked interrupt QH. The timer delay is
* 9 uframes; presumably that will be long enough.
*/
qh->unlink_cycle = ehci->intr_unlink_cycle;
/* New entries go at the end of the intr_unlink list */
list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
if (ehci->intr_unlinking)
; /* Avoid recursive calls */
else if (ehci->rh_state < EHCI_RH_RUNNING)
ehci_handle_intr_unlinks(ehci);
else if (ehci->intr_unlink.next == &qh->unlink_node) {
ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
++ehci->intr_unlink_cycle;
}
}
/*
* It is common only one intr URB is scheduled on one qh, and
* given complete() is run in tasklet context, introduce a bit
* delay to avoid unlink qh too early.
*/
static void start_unlink_intr_wait(struct ehci_hcd *ehci,
struct ehci_qh *qh)
{
qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
/* New entries go at the end of the intr_unlink_wait list */
list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
if (ehci->rh_state < EHCI_RH_RUNNING)
ehci_handle_start_intr_unlinks(ehci);
else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
++ehci->intr_unlink_wait_cycle;
}
}
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh_hw *hw = qh->hw;
int rc;
qh->qh_state = QH_STATE_IDLE;
hw->hw_next = EHCI_LIST_END(ehci);
if (!list_empty(&qh->qtd_list))
qh_completions(ehci, qh);
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
rc = qh_schedule(ehci, qh);
if (rc == 0) {
qh_refresh(ehci, qh);
qh_link_periodic(ehci, qh);
}
/* An error here likely indicates handshake failure
* or no space left in the schedule. Neither fault
* should happen often ...
*
* FIXME kill the now-dysfunctional queued urbs
*/
else {
ehci_err(ehci, "can't reschedule qh %p, err %d\n",
qh, rc);
}
}
/* maybe turn off periodic schedule */
--ehci->intr_count;
disable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
static int check_period(
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
unsigned uperiod,
unsigned usecs
) {
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/* convert "usecs we need" to "max already claimed" */
usecs = ehci->uframe_periodic_max - usecs;
for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
uframe += uperiod) {
if (ehci->bandwidth[uframe] > usecs)
return 0;
}
/* success! */
return 1;
}
static int check_intr_schedule(
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
struct ehci_qh *qh,
unsigned *c_maskp,
struct ehci_tt *tt
)
{
int retval = -ENOSPC;
u8 mask = 0;
if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
goto done;
if (!qh->ps.c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
unsigned i;
/* TODO : this may need FSTN for SSPLIT in uframe 5. */
for (i = uframe+2; i < 8 && i <= uframe+4; i++)
if (!check_period(ehci, frame, i,
qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
else
mask |= 1 << i;
retval = 0;
*c_maskp = mask;
}
#else
/* Make sure this tt's buffer is also available for CSPLITs.
* We pessimize a bit; probably the typical full speed case
* doesn't need the second CSPLIT.
*
* NOTE: both SPLIT and CSPLIT could be checked in just
* one smart pass...
*/
mask = 0x03 << (uframe + qh->gap_uf);
*c_maskp = mask;
mask |= 1 << uframe;
if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
if (!check_period(ehci, frame, uframe + qh->gap_uf,
qh->ps.bw_uperiod, qh->ps.c_usecs))
goto done;
retval = 0;
}
#endif
done:
return retval;
}
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int status = 0;
unsigned uframe;
unsigned c_mask;
struct ehci_qh_hw *hw = qh->hw;
struct ehci_tt *tt;
hw->hw_next = EHCI_LIST_END(ehci);
/* reuse the previous schedule slots, if we can */
if (qh->ps.phase != NO_FRAME) {
ehci_dbg(ehci, "reused qh %p schedule\n", qh);
return 0;
}
uframe = 0;
c_mask = 0;
tt = find_tt(qh->ps.udev);
if (IS_ERR(tt)) {
status = PTR_ERR(tt);
goto done;
}
compute_tt_budget(ehci->tt_budget, tt);
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
/* "normal" case, uframing flexible except with splits */
if (qh->ps.bw_period) {
int i;
unsigned frame;
for (i = qh->ps.bw_period; i > 0; --i) {
frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule(ehci,
frame, uframe, qh, &c_mask, tt);
if (status == 0)
goto got_it;
}
}
/* qh->ps.bw_period == 0 means every uframe */
} else {
status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
}
if (status)
goto done;
got_it:
qh->ps.phase = (qh->ps.period ? ehci->random_frame &
(qh->ps.period - 1) : 0);
qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
qh->ps.phase_uf = uframe;
qh->ps.cs_mask = qh->ps.period ?
(c_mask << 8) | (1 << uframe) :
QH_SMASK;
/* reset S-frame and (maybe) C-frame masks */
hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
reserve_release_intr_bandwidth(ehci, qh, 1);
done:
return status;
}
static int intr_submit(
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
) {
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
int status;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD(&empty);
qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
status = qh_schedule(ehci, qh);
if (status)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON(qh == NULL);
/* stuff into the periodic schedule */
if (qh->qh_state == QH_STATE_IDLE) {
qh_refresh(ehci, qh);
qh_link_periodic(ehci, qh);
} else {
/* cancel unlink wait for the qh */
cancel_unlink_wait_intr(ehci, qh);
}
/* ... update usbfs periodic stats */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
done:
if (unlikely(status))
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
done_not_linked:
spin_unlock_irqrestore(&ehci->lock, flags);
if (status)
qtd_list_free(ehci, urb, qtd_list);
return status;
}
static void scan_intr(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
intr_node) {
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
int temp;
/*
* Unlinks could happen here; completion reporting
* drops the lock. That's why ehci->qh_scan_next
* always holds the next qh to scan; if the next qh
* gets unlinked then ehci->qh_scan_next is adjusted
* in qh_unlink_periodic().
*/
temp = qh_completions(ehci, qh);
if (unlikely(temp))
start_unlink_intr(ehci, qh);
else if (unlikely(list_empty(&qh->qtd_list) &&
qh->qh_state == QH_STATE_LINKED))
start_unlink_intr_wait(ehci, qh);
}
}
}
/*-------------------------------------------------------------------------*/
/* ehci_iso_stream ops work with both ITD and SITD */
static struct ehci_iso_stream *
iso_stream_alloc(gfp_t mem_flags)
{
struct ehci_iso_stream *stream;
stream = kzalloc(sizeof(*stream), mem_flags);
if (likely(stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
stream->next_uframe = NO_FRAME;
stream->ps.phase = NO_FRAME;
}
return stream;
}
static void
iso_stream_init(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct urb *urb
)
{
static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
struct usb_device *dev = urb->dev;
u32 buf1;
unsigned epnum, maxp;
int is_input;
unsigned tmp;
/*
* this might be a "high bandwidth" highspeed endpoint,
* as encoded in the ep descriptor's wMaxPacket field
*/
epnum = usb_pipeendpoint(urb->pipe);
is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
buf1 = is_input ? 1 << 11 : 0;
/* knows about ITD vs SITD */
if (dev->speed == USB_SPEED_HIGH) {
unsigned multi = hb_mult(maxp);
stream->highspeed = 1;
maxp = max_packet(maxp);
buf1 |= maxp;
maxp *= multi;
stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
stream->buf1 = cpu_to_hc32(ehci, buf1);
stream->buf2 = cpu_to_hc32(ehci, multi);
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
*/
stream->ps.usecs = HS_USECS_ISO(maxp);
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
stream->uperiod = urb->interval;
stream->ps.period = urb->interval >> 3;
stream->bandwidth = stream->ps.usecs * 8 /
stream->ps.bw_uperiod;
} else {
u32 addr;
int think_time;
int hs_transfers;
addr = dev->ttport << 24;
if (!ehci_is_TDI(ehci)
|| (dev->tt->hub !=
ehci_to_hcd(ehci)->self.root_hub))
addr |= dev->tt->hub->devnum << 16;
addr |= epnum << 8;
addr |= dev->devnum;
stream->ps.usecs = HS_USECS_ISO(maxp);
think_time = dev->tt ? dev->tt->think_time : 0;
stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
dev->speed, is_input, 1, maxp));
hs_transfers = max(1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
addr |= 1 << 31;
stream->ps.c_usecs = stream->ps.usecs;
stream->ps.usecs = HS_USECS_ISO(1);
stream->ps.cs_mask = 1;
/* c-mask as specified in USB 2.0 11.18.4 3.c */
tmp = (1 << (hs_transfers + 2)) - 1;
stream->ps.cs_mask |= tmp << (8 + 2);
} else
stream->ps.cs_mask = smask_out[hs_transfers - 1];
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
stream->ps.bw_uperiod = stream->ps.bw_period << 3;
stream->ps.period = urb->interval;
stream->uperiod = urb->interval << 3;
stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
stream->ps.bw_period;
/* stream->splits gets created from cs_mask later */
stream->address = cpu_to_hc32(ehci, addr);
}
stream->ps.udev = dev;
stream->ps.ep = urb->ep;
stream->bEndpointAddress = is_input | epnum;
stream->maxp = maxp;
}
static struct ehci_iso_stream *
iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
{
unsigned epnum;
struct ehci_iso_stream *stream;
struct usb_host_endpoint *ep;
unsigned long flags;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein(urb->pipe))
ep = urb->dev->ep_in[epnum];
else
ep = urb->dev->ep_out[epnum];
spin_lock_irqsave(&ehci->lock, flags);
stream = ep->hcpriv;
if (unlikely(stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely(stream != NULL)) {
ep->hcpriv = stream;
iso_stream_init(ehci, stream, urb);
}
/* if dev->ep [epnum] is a QH, hw is set */
} else if (unlikely(stream->hw != NULL)) {
ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
urb->dev->devpath, epnum,
usb_pipein(urb->pipe) ? "in" : "out");
stream = NULL;
}
spin_unlock_irqrestore(&ehci->lock, flags);
return stream;
}
/*-------------------------------------------------------------------------*/
/* ehci_iso_sched ops can be ITD-only or SITD-only */
static struct ehci_iso_sched *
iso_sched_alloc(unsigned packets, gfp_t mem_flags)
{
struct ehci_iso_sched *iso_sched;
int size = sizeof(*iso_sched);
size += packets * sizeof(struct ehci_iso_packet);
iso_sched = kzalloc(size, mem_flags);
if (likely(iso_sched != NULL))
INIT_LIST_HEAD(&iso_sched->td_list);
return iso_sched;
}
static inline void
itd_sched_init(
struct ehci_hcd *ehci,
struct ehci_iso_sched *iso_sched,
struct ehci_iso_stream *stream,
struct urb *urb
)
{
unsigned i;
dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->uperiod;
/* figure out per-uframe itd fields that we'll need later
* when we fit new itds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
struct ehci_iso_packet *uframe = &iso_sched->packet[i];
unsigned length;
dma_addr_t buf;
u32 trans;
length = urb->iso_frame_desc[i].length;
buf = dma + urb->iso_frame_desc[i].offset;
trans = EHCI_ISOC_ACTIVE;
trans |= buf & 0x0fff;
if (unlikely(((i + 1) == urb->number_of_packets))
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= EHCI_ITD_IOC;
trans |= length << 16;
uframe->transaction = cpu_to_hc32(ehci, trans);
/* might need to cross a buffer page within a uframe */
uframe->bufp = (buf & ~(u64)0x0fff);
buf += length;
if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
uframe->cross = 1;
}
}
static void
iso_sched_free(
struct ehci_iso_stream *stream,
struct ehci_iso_sched *iso_sched
)
{
if (!iso_sched)
return;
/* caller must hold ehci->lock! */
list_splice(&iso_sched->td_list, &stream->free_list);
kfree(iso_sched);
}
static int
itd_urb_transaction(
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t mem_flags
)
{
struct ehci_itd *itd;
dma_addr_t itd_dma;
int i;
unsigned num_itds;
struct ehci_iso_sched *sched;
unsigned long flags;
sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
if (unlikely(sched == NULL))
return -ENOMEM;
itd_sched_init(ehci, sched, stream, urb);
if (urb->interval < 8)
num_itds = 1 + (sched->span + 7) / 8;
else
num_itds = urb->number_of_packets;
/* allocate/init ITDs */
spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < num_itds; i++) {
/*
* Use iTDs from the free list, but not iTDs that may
* still be in use by the hardware.
*/
if (likely(!list_empty(&stream->free_list))) {
itd = list_first_entry(&stream->free_list,
struct ehci_itd, itd_list);
if (itd->frame == ehci->now_frame)
goto alloc_itd;
list_del(&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
alloc_itd:
spin_unlock_irqrestore(&ehci->lock, flags);
itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
&itd_dma);
spin_lock_irqsave(&ehci->lock, flags);
if (!itd) {
iso_sched_free(stream, sched);
spin_unlock_irqrestore(&ehci->lock, flags);
return -ENOMEM;
}
}
memset(itd, 0, sizeof(*itd));
itd->itd_dma = itd_dma;
itd->frame = NO_FRAME;
list_add(&itd->itd_list, &sched->td_list);
}
spin_unlock_irqrestore(&ehci->lock, flags);
/* temporarily store schedule info in hcpriv */
urb->hcpriv = sched;
urb->error_count = 0;
return 0;
}
/*-------------------------------------------------------------------------*/
static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
struct ehci_iso_stream *stream, int sign)
{
unsigned uframe;
unsigned i, j;
unsigned s_mask, c_mask, m;
int usecs = stream->ps.usecs;
int c_usecs = stream->ps.c_usecs;
int tt_usecs = stream->ps.tt_usecs;
struct ehci_tt *tt;
if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
return;
uframe = stream->ps.bw_phase << 3;
bandwidth_dbg(ehci, sign, "iso", &stream->ps);
if (sign < 0) { /* Release bandwidth */
usecs = -usecs;
c_usecs = -c_usecs;
tt_usecs = -tt_usecs;
}
if (!stream->splits) { /* High speed */
for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
i += stream->ps.bw_uperiod)
ehci->bandwidth[i] += usecs;
} else { /* Full speed */
s_mask = stream->ps.cs_mask;
c_mask = s_mask >> 8;
/* NOTE: adjustment needed for frame overflow */
for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
i += stream->ps.bw_uperiod) {
for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
(++j, m <<= 1)) {
if (s_mask & m)
ehci->bandwidth[i+j] += usecs;
else if (c_mask & m)
ehci->bandwidth[i+j] += c_usecs;
}
}
tt = find_tt(stream->ps.udev);
if (sign > 0)
list_add_tail(&stream->ps.ps_list, &tt->ps_list);
else
list_del(&stream->ps.ps_list);
for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
i += stream->ps.bw_period)
tt->bandwidth[i] += tt_usecs;
}
}
static inline int
itd_slot_ok(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
unsigned uframe
)
{
unsigned usecs;
/* convert "usecs we need" to "max already claimed" */
usecs = ehci->uframe_periodic_max - stream->ps.usecs;
for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
uframe += stream->ps.bw_uperiod) {
if (ehci->bandwidth[uframe] > usecs)
return 0;
}
return 1;
}
static inline int
sitd_slot_ok(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
unsigned uframe,
struct ehci_iso_sched *sched,
struct ehci_tt *tt
)
{
unsigned mask, tmp;
unsigned frame, uf;
mask = stream->ps.cs_mask << (uframe & 7);
/* for OUT, don't wrap SSPLIT into H-microframe 7 */
if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
return 0;
/* for IN, don't wrap CSPLIT into the next frame */
if (mask & ~0xffff)
return 0;
/* check bandwidth */
uframe &= stream->ps.bw_uperiod - 1;
frame = uframe >> 3;
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
/* The tt's fullspeed bus bandwidth must be available.
* tt_available scheduling guarantees 10+% for control/bulk.
*/
uf = uframe & 7;
if (!tt_available(ehci, &stream->ps, tt, frame, uf))
return 0;
#else
/* tt must be idle for start(s), any gap, and csplit.
* assume scheduling slop leaves 10+% for control/bulk.
*/
if (!tt_no_collision(ehci, stream->ps.bw_period,
stream->ps.udev, frame, mask))
return 0;
#endif
do {
unsigned max_used;
unsigned i;
/* check starts (OUT uses more than one) */
uf = uframe;
max_used = ehci->uframe_periodic_max - stream->ps.usecs;
for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
if (ehci->bandwidth[uf] > max_used)
return 0;
}
/* for IN, check CSPLIT */
if (stream->ps.c_usecs) {
max_used = ehci->uframe_periodic_max -
stream->ps.c_usecs;
uf = uframe & ~7;
tmp = 1 << (2+8);
for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
if ((stream->ps.cs_mask & tmp) == 0)
continue;
if (ehci->bandwidth[uf+i] > max_used)
return 0;
}
}
uframe += stream->ps.bw_uperiod;
} while (uframe < EHCI_BANDWIDTH_SIZE);
stream->ps.cs_mask <<= uframe & 7;
stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
return 1;
}
/*
* This scheduler plans almost as far into the future as it has actual
* periodic schedule slots. (Affected by TUNE_FLS, which defaults to
* "as small as possible" to be cache-friendlier.) That limits the size
* transfers you can stream reliably; avoid more than 64 msec per urb.
* Also avoid queue depths of less than ehci's worst irq latency (affected
* by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
* and other factors); or more than about 230 msec total (for portability,
* given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
static int
iso_stream_schedule(
struct ehci_hcd *ehci,
struct urb *urb,
struct ehci_iso_stream *stream
)
{
u32 now, base, next, start, period, span, now2;
u32 wrap = 0, skip = 0;
int status = 0;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
bool empty = list_empty(&stream->td_list);
bool new_stream = false;
period = stream->uperiod;
span = sched->span;
if (!stream->highspeed)
span <<= 3;
/* Start a new isochronous stream? */
if (unlikely(empty && !hcd_periodic_completion_in_progress(
ehci_to_hcd(ehci), urb->ep))) {
/* Schedule the endpoint */
if (stream->ps.phase == NO_FRAME) {
int done = 0;
struct ehci_tt *tt = find_tt(stream->ps.udev);
if (IS_ERR(tt)) {
status = PTR_ERR(tt);
goto fail;
}
compute_tt_budget(ehci->tt_budget, tt);
start = ((-(++ehci->random_frame)) << 3) & (period - 1);
/* find a uframe slot with enough bandwidth.
* Early uframes are more precious because full-speed
* iso IN transfers can't use late uframes,
* and therefore they should be allocated last.
*/
next = start;
start += period;
do {
start--;
/* check schedule: enough space? */
if (stream->highspeed) {
if (itd_slot_ok(ehci, stream, start))
done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, stream, start,
sched, tt))
done = 1;
}
} while (start > next && !done);
/* no room in the schedule */
if (!done) {
ehci_dbg(ehci, "iso sched full %p", urb);
status = -ENOSPC;
goto fail;
}
stream->ps.phase = (start >> 3) &
(stream->ps.period - 1);
stream->ps.bw_phase = stream->ps.phase &
(stream->ps.bw_period - 1);
stream->ps.phase_uf = start & 7;
reserve_release_iso_bandwidth(ehci, stream, 1);
}
/* New stream is already scheduled; use the upcoming slot */
else {
start = (stream->ps.phase << 3) + stream->ps.phase_uf;
}
stream->next_uframe = start;
new_stream = true;
}
now = ehci_read_frame_index(ehci) & (mod - 1);
/* Take the isochronous scheduling threshold into account */
if (ehci->i_thresh)
next = now + ehci->i_thresh; /* uframe cache */
else
next = (now + 2 + 7) & ~0x07; /* full frame cache */
/* If needed, initialize last_iso_frame so that this URB will be seen */
if (ehci->isoc_count == 0)
ehci->last_iso_frame = now >> 3;
/*
* Use ehci->last_iso_frame as the base. There can't be any
* TDs scheduled for earlier than that.
*/
base = ehci->last_iso_frame << 3;
next = (next - base) & (mod - 1);
start = (stream->next_uframe - base) & (mod - 1);
if (unlikely(new_stream))
goto do_ASAP;
/*
* Typical case: reuse current schedule, stream may still be active.
* Hopefully there are no gaps from the host falling behind
* (irq delays etc). If there are, the behavior depends on
* whether URB_ISO_ASAP is set.
*/
now2 = (now - base) & (mod - 1);
/* Is the schedule about to wrap around? */
if (unlikely(!empty && start < period)) {
ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
urb, stream->next_uframe, base, period, mod);
status = -EFBIG;
goto fail;
}
/* Is the next packet scheduled after the base time? */
if (likely(!empty || start <= now2 + period)) {
/* URB_ISO_ASAP: make sure that start >= next */
if (unlikely(start < next &&
(urb->transfer_flags & URB_ISO_ASAP)))
goto do_ASAP;
/* Otherwise use start, if it's not in the past */
if (likely(start >= now2))
goto use_start;
/* Otherwise we got an underrun while the queue was empty */
} else {
if (urb->transfer_flags & URB_ISO_ASAP)
goto do_ASAP;
wrap = mod;
now2 += mod;
}
/* How many uframes and packets do we need to skip? */
skip = (now2 - start + period - 1) & -period;
if (skip >= span) { /* Entirely in the past? */
ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
urb, start + base, span - period, now2 + base,
base);
/* Try to keep the last TD intact for scanning later */
skip = span - period;
/* Will it come before the current scan position? */
if (empty) {
skip = span; /* Skip the entire URB */
status = 1; /* and give it back immediately */
iso_sched_free(stream, sched);
sched = NULL;
}
}
urb->error_count = skip / period;
if (sched)
sched->first_packet = urb->error_count;
goto use_start;
do_ASAP:
/* Use the first slot after "next" */
start = next + ((start - next) & (period - 1));
use_start:
/* Tried to schedule too far into the future? */
if (unlikely(start + span - period >= mod + wrap)) {
ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
urb, start, span - period, mod + wrap);
status = -EFBIG;
goto fail;
}
start += base;
stream->next_uframe = (start + skip) & (mod - 1);
/* report high speed start in uframes; full speed, in frames */
urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
return status;
fail:
iso_sched_free(stream, sched);
urb->hcpriv = NULL;
return status;
}
/*-------------------------------------------------------------------------*/
static inline void
itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
struct ehci_itd *itd)
{
int i;
/* it's been recently zeroed */
itd->hw_next = EHCI_LIST_END(ehci);
itd->hw_bufp[0] = stream->buf0;
itd->hw_bufp[1] = stream->buf1;
itd->hw_bufp[2] = stream->buf2;
for (i = 0; i < 8; i++)
itd->index[i] = -1;
/* All other fields are filled when scheduling */
}
static inline void
itd_patch(
struct ehci_hcd *ehci,
struct ehci_itd *itd,
struct ehci_iso_sched *iso_sched,
unsigned index,
u16 uframe
)
{
struct ehci_iso_packet *uf = &iso_sched->packet[index];
unsigned pg = itd->pg;
/* BUG_ON(pg == 6 && uf->cross); */
uframe &= 0x07;
itd->index[uframe] = index;
itd->hw_transaction[uframe] = uf->transaction;
itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
/* iso_frame_desc[].offset must be strictly increasing */
if (unlikely(uf->cross)) {
u64 bufp = uf->bufp + 4096;
itd->pg = ++pg;
itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
}
}
static inline void
itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{
union ehci_shadow *prev = &ehci->pshadow[frame];
__hc32 *hw_p = &ehci->periodic[frame];
union ehci_shadow here = *prev;
__hc32 type = 0;
/* skip any iso nodes which might belong to previous microframes */
while (here.ptr) {
type = Q_NEXT_TYPE(ehci, *hw_p);
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
prev = periodic_next_shadow(ehci, prev, type);
hw_p = shadow_next_periodic(ehci, &here, type);
here = *prev;
}
itd->itd_next = here;
itd->hw_next = *hw_p;
prev->itd = itd;
itd->frame = frame;
wmb();
*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
/* fit urb's itds into the selected schedule slot; activate as needed */
static void itd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
struct ehci_iso_stream *stream
)
{
int packet;
unsigned next_uframe, uframe, frame;
struct ehci_iso_sched *iso_sched = urb->hcpriv;
struct ehci_itd *itd;
next_uframe = stream->next_uframe & (mod - 1);
if (unlikely(list_empty(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_disable();
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
for (packet = iso_sched->first_packet, itd = NULL;
packet < urb->number_of_packets;) {
if (itd == NULL) {
/* ASSERT: we have all necessary itds */
/* BUG_ON(list_empty(&iso_sched->td_list)); */
/* ASSERT: no itds for this endpoint in this uframe */
itd = list_entry(iso_sched->td_list.next,
struct ehci_itd, itd_list);
list_move_tail(&itd->itd_list, &stream->td_list);
itd->stream = stream;
itd->urb = urb;
itd_init(ehci, stream, itd);
}
uframe = next_uframe & 0x07;
frame = next_uframe >> 3;
itd_patch(ehci, itd, iso_sched, packet, uframe);
next_uframe += stream->uperiod;
next_uframe &= mod - 1;
packet++;
/* link completed itds into the schedule */
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
itd = NULL;
}
}
stream->next_uframe = next_uframe;
/* don't need that schedule data any more */
iso_sched_free(stream, iso_sched);
urb->hcpriv = stream;
++ehci->isoc_count;
enable_periodic(ehci);
}
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
/* Process and recycle a completed ITD. Return true iff its urb completed,
* and hence its completion callback probably added things to the hardware
* schedule.
*
* Note that we carefully avoid recycling this descriptor until after any
* completion callback runs, so that it won't be reused quickly. That is,
* assuming (a) no more than two urbs per frame on this endpoint, and also
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
{
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
unsigned uframe;
int urb_index = -1;
struct ehci_iso_stream *stream = itd->stream;
struct usb_device *dev;
bool retval = false;
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
if (likely(itd->index[uframe] == -1))
continue;
urb_index = itd->index[uframe];
desc = &urb->iso_frame_desc[urb_index];
t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
itd->hw_transaction[uframe] = 0;
/* report transfer status */
if (unlikely(t & ISO_ERRS)) {
urb->error_count++;
if (t & EHCI_ISOC_BUF_ERR)
desc->status = usb_pipein(urb->pipe)
? -ENOSR /* hc couldn't read */
: -ECOMM; /* hc couldn't write */
else if (t & EHCI_ISOC_BABBLE)
desc->status = -EOVERFLOW;
else /* (t & EHCI_ISOC_XACTERR) */
desc->status = -EPROTO;
/* HC need not update length with this error */
if (!(t & EHCI_ISOC_BABBLE)) {
desc->actual_length = EHCI_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
desc->status = 0;
desc->actual_length = EHCI_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
} else {
/* URB was too late */
urb->error_count++;
}
}
/* handle completion now? */
if (likely((urb_index + 1) != urb->number_of_packets))
goto done;
/*
* ASSERT: it's really the last itd for this urb
* list_for_each_entry (itd, &stream->td_list, itd_list)
* BUG_ON(itd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
dev = urb->dev;
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
--ehci->isoc_count;
disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_enable();
}
if (unlikely(list_is_singular(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
done:
itd->urb = NULL;
/* Add to the end of the free list for later reuse */
list_move_tail(&itd->itd_list, &stream->free_list);
/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
if (list_empty(&stream->td_list)) {
list_splice_tail_init(&stream->free_list,
&ehci->cached_itd_list);
start_free_itds(ehci);
}
return retval;
}
/*-------------------------------------------------------------------------*/
static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
struct ehci_iso_stream *stream;
/* Get iso_stream head */
stream = iso_stream_find(ehci, urb);
if (unlikely(stream == NULL)) {
ehci_dbg(ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (unlikely(urb->interval != stream->uperiod)) {
ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
stream->uperiod, urb->interval);
goto done;
}
#ifdef EHCI_URB_TRACE
ehci_dbg(ehci,
"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->transfer_buffer_length,
urb->number_of_packets, urb->interval,
stream);
#endif
/* allocate ITDs w/o locking anything */
status = itd_urb_transaction(stream, ehci, urb, mem_flags);
if (unlikely(status < 0)) {
ehci_dbg(ehci, "can't init itds\n");
goto done;
}
/* schedule ... need to lock */
spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (likely(status == 0)) {
itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
} else if (status > 0) {
status = 0;
ehci_urb_done(ehci, urb, 0);
} else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
}
done_not_linked:
spin_unlock_irqrestore(&ehci->lock, flags);
done:
return status;
}
/*-------------------------------------------------------------------------*/
/*
* "Split ISO TDs" ... used for USB 1.1 devices going through the
* TTs in USB 2.0 hubs. These need microframe scheduling.
*/
static inline void
sitd_sched_init(
struct ehci_hcd *ehci,
struct ehci_iso_sched *iso_sched,
struct ehci_iso_stream *stream,
struct urb *urb
)
{
unsigned i;
dma_addr_t dma = urb->transfer_dma;
/* how many frames are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->ps.period;
/* figure out per-frame sitd fields that we'll need later
* when we fit new sitds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
struct ehci_iso_packet *packet = &iso_sched->packet[i];
unsigned length;
dma_addr_t buf;
u32 trans;
length = urb->iso_frame_desc[i].length & 0x03ff;
buf = dma + urb->iso_frame_desc[i].offset;
trans = SITD_STS_ACTIVE;
if (((i + 1) == urb->number_of_packets)
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= SITD_IOC;
trans |= length << 16;
packet->transaction = cpu_to_hc32(ehci, trans);
/* might need to cross a buffer page within a td */
packet->bufp = buf;
packet->buf1 = (buf + length) & ~0x0fff;
if (packet->buf1 != (buf & ~(u64)0x0fff))
packet->cross = 1;
/* OUT uses multiple start-splits */
if (stream->bEndpointAddress & USB_DIR_IN)
continue;
length = (length + 187) / 188;
if (length > 1) /* BEGIN vs ALL */
length |= 1 << 3;
packet->buf1 |= length;
}
}
static int
sitd_urb_transaction(
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t mem_flags
)
{
struct ehci_sitd *sitd;
dma_addr_t sitd_dma;
int i;
struct ehci_iso_sched *iso_sched;
unsigned long flags;
iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
if (iso_sched == NULL)
return -ENOMEM;
sitd_sched_init(ehci, iso_sched, stream, urb);
/* allocate/init sITDs */
spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < urb->number_of_packets; i++) {
/* NOTE: for now, we don't try to handle wraparound cases
* for IN (using sitd->hw_backpointer, like a FSTN), which
* means we never need two sitds for full speed packets.
*/
/*
* Use siTDs from the free list, but not siTDs that may
* still be in use by the hardware.
*/
if (likely(!list_empty(&stream->free_list))) {
sitd = list_first_entry(&stream->free_list,
struct ehci_sitd, sitd_list);
if (sitd->frame == ehci->now_frame)
goto alloc_sitd;
list_del(&sitd->sitd_list);
sitd_dma = sitd->sitd_dma;
} else {
alloc_sitd:
spin_unlock_irqrestore(&ehci->lock, flags);
sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
&sitd_dma);
spin_lock_irqsave(&ehci->lock, flags);
if (!sitd) {
iso_sched_free(stream, iso_sched);
spin_unlock_irqrestore(&ehci->lock, flags);
return -ENOMEM;
}
}
memset(sitd, 0, sizeof(*sitd));
sitd->sitd_dma = sitd_dma;
sitd->frame = NO_FRAME;
list_add(&sitd->sitd_list, &iso_sched->td_list);
}
/* temporarily store schedule info in hcpriv */
urb->hcpriv = iso_sched;
urb->error_count = 0;
spin_unlock_irqrestore(&ehci->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static inline void
sitd_patch(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct ehci_sitd *sitd,
struct ehci_iso_sched *iso_sched,
unsigned index
)
{
struct ehci_iso_packet *uf = &iso_sched->packet[index];
u64 bufp;
sitd->hw_next = EHCI_LIST_END(ehci);
sitd->hw_fullspeed_ep = stream->address;
sitd->hw_uframe = stream->splits;
sitd->hw_results = uf->transaction;
sitd->hw_backpointer = EHCI_LIST_END(ehci);
bufp = uf->bufp;
sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
if (uf->cross)
bufp += 4096;
sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
sitd->index = index;
}
static inline void
sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
{
/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
sitd->sitd_next = ehci->pshadow[frame];
sitd->hw_next = ehci->periodic[frame];
ehci->pshadow[frame].sitd = sitd;
sitd->frame = frame;
wmb();
ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
}
/* fit urb's sitds into the selected schedule slot; activate as needed */
static void sitd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
struct ehci_iso_stream *stream
)
{
int packet;
unsigned next_uframe;
struct ehci_iso_sched *sched = urb->hcpriv;
struct ehci_sitd *sitd;
next_uframe = stream->next_uframe;
if (list_empty(&stream->td_list))
/* usbfs ignores TT bandwidth */
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_disable();
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
for (packet = sched->first_packet, sitd = NULL;
packet < urb->number_of_packets;
packet++) {
/* ASSERT: we have all necessary sitds */
BUG_ON(list_empty(&sched->td_list));
/* ASSERT: no itds for this endpoint in this frame */
sitd = list_entry(sched->td_list.next,
struct ehci_sitd, sitd_list);
list_move_tail(&sitd->sitd_list, &stream->td_list);
sitd->stream = stream;
sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
next_uframe += stream->uperiod;
}
stream->next_uframe = next_uframe & (mod - 1);
/* don't need that schedule data any more */
iso_sched_free(stream, sched);
urb->hcpriv = stream;
++ehci->isoc_count;
enable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
| SITD_STS_XACT | SITD_STS_MMF)
/* Process and recycle a completed SITD. Return true iff its urb completed,
* and hence its completion callback probably added things to the hardware
* schedule.
*
* Note that we carefully avoid recycling this descriptor until after any
* completion callback runs, so that it won't be reused quickly. That is,
* assuming (a) no more than two urbs per frame on this endpoint, and also
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
struct urb *urb = sitd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
int urb_index;
struct ehci_iso_stream *stream = sitd->stream;
struct usb_device *dev;
bool retval = false;
urb_index = sitd->index;
desc = &urb->iso_frame_desc[urb_index];
t = hc32_to_cpup(ehci, &sitd->hw_results);
/* report transfer status */
if (unlikely(t & SITD_ERRS)) {
urb->error_count++;
if (t & SITD_STS_DBE)
desc->status = usb_pipein(urb->pipe)
? -ENOSR /* hc couldn't read */
: -ECOMM; /* hc couldn't write */
else if (t & SITD_STS_BABBLE)
desc->status = -EOVERFLOW;
else /* XACT, MMF, etc */
desc->status = -EPROTO;
} else if (unlikely(t & SITD_STS_ACTIVE)) {
/* URB was too late */
urb->error_count++;
} else {
desc->status = 0;
desc->actual_length = desc->length - SITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
/* handle completion now? */
if ((urb_index + 1) != urb->number_of_packets)
goto done;
/*
* ASSERT: it's really the last sitd for this urb
* list_for_each_entry (sitd, &stream->td_list, sitd_list)
* BUG_ON(sitd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
dev = urb->dev;
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
--ehci->isoc_count;
disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_enable();
}
if (list_is_singular(&stream->td_list))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
done:
sitd->urb = NULL;
/* Add to the end of the free list for later reuse */
list_move_tail(&sitd->sitd_list, &stream->free_list);
/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
if (list_empty(&stream->td_list)) {
list_splice_tail_init(&stream->free_list,
&ehci->cached_sitd_list);
start_free_itds(ehci);
}
return retval;
}
static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
struct ehci_iso_stream *stream;
/* Get iso_stream head */
stream = iso_stream_find(ehci, urb);
if (stream == NULL) {
ehci_dbg(ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (urb->interval != stream->ps.period) {
ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
stream->ps.period, urb->interval);
goto done;
}
#ifdef EHCI_URB_TRACE
ehci_dbg(ehci,
"submit %p dev%s ep%d%s-iso len %d\n",
urb, urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->transfer_buffer_length);
#endif
/* allocate SITDs */
status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
if (status < 0) {
ehci_dbg(ehci, "can't init sitds\n");
goto done;
}
/* schedule ... need to lock */
spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
}
status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
if (unlikely(status))
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (likely(status == 0)) {
sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
} else if (status > 0) {
status = 0;
ehci_urb_done(ehci, urb, 0);
} else {
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
}
done_not_linked:
spin_unlock_irqrestore(&ehci->lock, flags);
done:
return status;
}
/*-------------------------------------------------------------------------*/
static void scan_isoc(struct ehci_hcd *ehci)
{
unsigned uf, now_frame, frame;
unsigned fmask = ehci->periodic_size - 1;
bool modified, live;
union ehci_shadow q, *q_p;
__hc32 type, *hw_p;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
if (ehci->rh_state >= EHCI_RH_RUNNING) {
uf = ehci_read_frame_index(ehci);
now_frame = (uf >> 3) & fmask;
live = true;
} else {
now_frame = (ehci->last_iso_frame - 1) & fmask;
live = false;
}
ehci->now_frame = now_frame;
frame = ehci->last_iso_frame;
restart:
/* Scan each element in frame's queue for completions */
q_p = &ehci->pshadow[frame];
hw_p = &ehci->periodic[frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(ehci, *hw_p);
modified = false;
while (q.ptr != NULL) {
switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_ITD:
/*
* If this ITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
if (frame == now_frame && live) {
rmb();
for (uf = 0; uf < 8; uf++) {
if (q.itd->hw_transaction[uf] &
ITD_ACTIVE(ehci))
break;
}
if (uf < 8) {
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE(ehci,
q.itd->hw_next);
q = *q_p;
break;
}
}
/*
* Take finished ITDs out of the schedule
* and process them: recycle, maybe report
* URB completion. HC won't cache the
* pointer for much longer, if at all.
*/
*q_p = q.itd->itd_next;
if (!ehci->use_dummy_qh ||
q.itd->hw_next != EHCI_LIST_END(ehci))
*hw_p = q.itd->hw_next;
else
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
wmb();
modified = itd_complete(ehci, q.itd);
q = *q_p;
break;
case Q_TYPE_SITD:
/*
* If this SITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
if (((frame == now_frame) ||
(((frame + 1) & fmask) == now_frame))
&& live
&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
q = *q_p;
break;
}
/*
* Take finished SITDs out of the schedule
* and process them: recycle, maybe report
* URB completion.
*/
*q_p = q.sitd->sitd_next;
if (!ehci->use_dummy_qh ||
q.sitd->hw_next != EHCI_LIST_END(ehci))
*hw_p = q.sitd->hw_next;
else
*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
wmb();
modified = sitd_complete(ehci, q.sitd);
q = *q_p;
break;
default:
ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
/* BUG(); */
/* FALL THROUGH */
case Q_TYPE_QH:
case Q_TYPE_FSTN:
/* End of the iTDs and siTDs */
q.ptr = NULL;
break;
}
/* Assume completion callbacks modify the queue */
if (unlikely(modified && ehci->isoc_count > 0))
goto restart;
}
/* Stop when we have reached the current frame */
if (frame == now_frame)
return;
/* The last frame may still have active siTDs */
ehci->last_iso_frame = frame;
frame = (frame + 1) & fmask;
goto restart;
}
| gpl-2.0 |
gospo/net-next | block/t10-pi.c | 467 | 4964 | /*
* t10_pi.c - Functions for generating and verifying T10 Protection
* Information.
*
* Copyright (C) 2007, 2008, 2014 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
*/
#include <linux/t10-pi.h>
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <net/checksum.h>
typedef __be16 (csum_fn) (void *, unsigned int);
static const __be16 APP_ESCAPE = (__force __be16) 0xffff;
static const __be32 REF_ESCAPE = (__force __be32) 0xffffffff;
static __be16 t10_pi_crc_fn(void *data, unsigned int len)
{
return cpu_to_be16(crc_t10dif(data, len));
}
static __be16 t10_pi_ip_fn(void *data, unsigned int len)
{
return (__force __be16)ip_compute_csum(data, len);
}
/*
* Type 1 and Type 2 protection use the same format: 16 bit guard tag,
* 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
* tag.
*/
static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
unsigned int type)
{
unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf;
pi->guard_tag = fn(iter->data_buf, iter->interval);
pi->app_tag = 0;
if (type == 1)
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
else
pi->ref_tag = 0;
iter->data_buf += iter->interval;
iter->prot_buf += sizeof(struct t10_pi_tuple);
iter->seed++;
}
return 0;
}
static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
unsigned int type)
{
unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf;
__be16 csum;
switch (type) {
case 1:
case 2:
if (pi->app_tag == APP_ESCAPE)
goto next;
if (be32_to_cpu(pi->ref_tag) !=
lower_32_bits(iter->seed)) {
pr_err("%s: ref tag error at location %llu " \
"(rcvd %u)\n", iter->disk_name,
(unsigned long long)
iter->seed, be32_to_cpu(pi->ref_tag));
return -EILSEQ;
}
break;
case 3:
if (pi->app_tag == APP_ESCAPE &&
pi->ref_tag == REF_ESCAPE)
goto next;
break;
}
csum = fn(iter->data_buf, iter->interval);
if (pi->guard_tag != csum) {
pr_err("%s: guard tag error at sector %llu " \
"(rcvd %04x, want %04x)\n", iter->disk_name,
(unsigned long long)iter->seed,
be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
return -EILSEQ;
}
next:
iter->data_buf += iter->interval;
iter->prot_buf += sizeof(struct t10_pi_tuple);
iter->seed++;
}
return 0;
}
static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_crc_fn, 1);
}
static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_ip_fn, 1);
}
static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_crc_fn, 1);
}
static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_ip_fn, 1);
}
static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_crc_fn, 3);
}
static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_ip_fn, 3);
}
static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_crc_fn, 3);
}
static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_ip_fn, 3);
}
struct blk_integrity_profile t10_pi_type1_crc = {
.name = "T10-DIF-TYPE1-CRC",
.generate_fn = t10_pi_type1_generate_crc,
.verify_fn = t10_pi_type1_verify_crc,
};
EXPORT_SYMBOL(t10_pi_type1_crc);
struct blk_integrity_profile t10_pi_type1_ip = {
.name = "T10-DIF-TYPE1-IP",
.generate_fn = t10_pi_type1_generate_ip,
.verify_fn = t10_pi_type1_verify_ip,
};
EXPORT_SYMBOL(t10_pi_type1_ip);
struct blk_integrity_profile t10_pi_type3_crc = {
.name = "T10-DIF-TYPE3-CRC",
.generate_fn = t10_pi_type3_generate_crc,
.verify_fn = t10_pi_type3_verify_crc,
};
EXPORT_SYMBOL(t10_pi_type3_crc);
struct blk_integrity_profile t10_pi_type3_ip = {
.name = "T10-DIF-TYPE3-IP",
.generate_fn = t10_pi_type3_generate_ip,
.verify_fn = t10_pi_type3_verify_ip,
};
EXPORT_SYMBOL(t10_pi_type3_ip);
| gpl-2.0 |
ramseydsilva/linux | drivers/rtc/rtc-at32ap700x.c | 723 | 6927 | /*
* An RTC driver for the AVR32 AT32AP700x processor series.
*
* Copyright (C) 2007 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/io.h>
/*
* This is a bare-bones RTC. It runs during most system sleep states, but has
* no battery backup and gets reset during system restart. It must be
* initialized from an external clock (network, I2C, etc) before it can be of
* much use.
*
* The alarm functionality is limited by the hardware, not supporting
* periodic interrupts.
*/
#define RTC_CTRL 0x00
#define RTC_CTRL_EN 0
#define RTC_CTRL_PCLR 1
#define RTC_CTRL_TOPEN 2
#define RTC_CTRL_PSEL 8
#define RTC_VAL 0x04
#define RTC_TOP 0x08
#define RTC_IER 0x10
#define RTC_IER_TOPI 0
#define RTC_IDR 0x14
#define RTC_IDR_TOPI 0
#define RTC_IMR 0x18
#define RTC_IMR_TOPI 0
#define RTC_ISR 0x1c
#define RTC_ISR_TOPI 0
#define RTC_ICR 0x20
#define RTC_ICR_TOPI 0
#define RTC_BIT(name) (1 << RTC_##name)
#define RTC_BF(name, value) ((value) << RTC_##name)
#define rtc_readl(dev, reg) \
__raw_readl((dev)->regs + RTC_##reg)
#define rtc_writel(dev, reg, value) \
__raw_writel((value), (dev)->regs + RTC_##reg)
struct rtc_at32ap700x {
struct rtc_device *rtc;
void __iomem *regs;
unsigned long alarm_time;
unsigned long irq;
/* Protect against concurrent register access. */
spinlock_t lock;
};
static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm)
{
struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
unsigned long now;
now = rtc_readl(rtc, VAL);
rtc_time_to_tm(now, tm);
return 0;
}
static int at32_rtc_settime(struct device *dev, struct rtc_time *tm)
{
struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
unsigned long now;
int ret;
ret = rtc_tm_to_time(tm, &now);
if (ret == 0)
rtc_writel(rtc, VAL, now);
return ret;
}
static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
spin_lock_irq(&rtc->lock);
rtc_time_to_tm(rtc->alarm_time, &alrm->time);
alrm->enabled = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0;
alrm->pending = rtc_readl(rtc, ISR) & RTC_BIT(ISR_TOPI) ? 1 : 0;
spin_unlock_irq(&rtc->lock);
return 0;
}
static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
unsigned long rtc_unix_time;
unsigned long alarm_unix_time;
int ret;
rtc_unix_time = rtc_readl(rtc, VAL);
ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time);
if (ret)
return ret;
if (alarm_unix_time < rtc_unix_time)
return -EINVAL;
spin_lock_irq(&rtc->lock);
rtc->alarm_time = alarm_unix_time;
rtc_writel(rtc, TOP, rtc->alarm_time);
if (alrm->enabled)
rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
| RTC_BIT(CTRL_TOPEN));
else
rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
& ~RTC_BIT(CTRL_TOPEN));
spin_unlock_irq(&rtc->lock);
return ret;
}
static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
int ret = 0;
spin_lock_irq(&rtc->lock);
if (enabled) {
if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
ret = -EINVAL;
goto out;
}
rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
| RTC_BIT(CTRL_TOPEN));
rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
} else {
rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
& ~RTC_BIT(CTRL_TOPEN));
rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
}
out:
spin_unlock_irq(&rtc->lock);
return ret;
}
static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
{
struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id;
unsigned long isr = rtc_readl(rtc, ISR);
unsigned long events = 0;
int ret = IRQ_NONE;
spin_lock(&rtc->lock);
if (isr & RTC_BIT(ISR_TOPI)) {
rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
& ~RTC_BIT(CTRL_TOPEN));
rtc_writel(rtc, VAL, rtc->alarm_time);
events = RTC_AF | RTC_IRQF;
rtc_update_irq(rtc->rtc, 1, events);
ret = IRQ_HANDLED;
}
spin_unlock(&rtc->lock);
return ret;
}
static struct rtc_class_ops at32_rtc_ops = {
.read_time = at32_rtc_readtime,
.set_time = at32_rtc_settime,
.read_alarm = at32_rtc_readalarm,
.set_alarm = at32_rtc_setalarm,
.alarm_irq_enable = at32_rtc_alarm_irq_enable,
};
static int __init at32_rtc_probe(struct platform_device *pdev)
{
struct resource *regs;
struct rtc_at32ap700x *rtc;
int irq;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(struct rtc_at32ap700x),
GFP_KERNEL);
if (!rtc)
return -ENOMEM;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_dbg(&pdev->dev, "no mmio resource defined\n");
return -ENXIO;
}
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_dbg(&pdev->dev, "could not get irq\n");
return -ENXIO;
}
rtc->irq = irq;
rtc->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!rtc->regs) {
dev_dbg(&pdev->dev, "could not map I/O memory\n");
return -ENOMEM;
}
spin_lock_init(&rtc->lock);
/*
* Maybe init RTC: count from zero at 1 Hz, disable wrap irq.
*
* Do not reset VAL register, as it can hold an old time
* from last JTAG reset.
*/
if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) {
rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR));
rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe)
| RTC_BIT(CTRL_EN));
}
ret = devm_request_irq(&pdev->dev, irq, at32_rtc_interrupt, IRQF_SHARED,
"rtc", rtc);
if (ret) {
dev_dbg(&pdev->dev, "could not request irq %d\n", irq);
return ret;
}
platform_set_drvdata(pdev, rtc);
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&at32_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
dev_dbg(&pdev->dev, "could not register rtc device\n");
return PTR_ERR(rtc->rtc);
}
device_init_wakeup(&pdev->dev, 1);
dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
(unsigned long)rtc->regs, rtc->irq);
return 0;
}
static int __exit at32_rtc_remove(struct platform_device *pdev)
{
device_init_wakeup(&pdev->dev, 0);
return 0;
}
MODULE_ALIAS("platform:at32ap700x_rtc");
static struct platform_driver at32_rtc_driver = {
.remove = __exit_p(at32_rtc_remove),
.driver = {
.name = "at32ap700x_rtc",
},
};
module_platform_driver_probe(at32_rtc_driver, at32_rtc_probe);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x");
MODULE_LICENSE("GPL");
| gpl-2.0 |
linux-scraping/linux-grsecurity | drivers/ata/pata_imx.c | 1235 | 6454 | /*
* Freescale iMX PATA driver
*
* Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org>
*
* Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* TODO:
* - dmaengine support
* - check if timing stuff needed
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#define DRV_NAME "pata_imx"
#define PATA_IMX_ATA_CONTROL 0x24
#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0)
#define PATA_IMX_ATA_INT_EN 0x2C
#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3)
#define PATA_IMX_DRIVE_DATA 0xA0
#define PATA_IMX_DRIVE_CONTROL 0xD8
struct pata_imx_priv {
struct clk *clk;
/* timings/interrupt/control regs */
void __iomem *host_regs;
u32 ata_ctl;
};
static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_device *dev;
struct ata_port *ap = link->ap;
struct pata_imx_priv *priv = ap->host->private_data;
u32 val;
ata_for_each_dev(dev, link, ENABLED) {
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
if (ata_pio_need_iordy(dev))
val |= PATA_IMX_ATA_CTRL_IORDY_EN;
else
val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
static struct scsi_host_template pata_imx_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations pata_imx_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
.set_mode = pata_imx_set_mode,
};
static void pata_imx_setup_port(struct ata_ioports *ioaddr)
{
/* Fixup the port shift for platforms that need it */
ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
}
static int pata_imx_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
struct pata_imx_priv *priv;
int irq = 0;
struct resource *io_res;
int ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct pata_imx_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
return PTR_ERR(priv->clk);
}
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
ret = -ENOMEM;
goto err;
}
host->private_data = priv;
ap = host->ports[0];
ap->ops = &pata_imx_port_ops;
ap->pio_mask = ATA_PIO0;
ap->flags |= ATA_FLAG_SLAVE_POSS;
io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res);
if (IS_ERR(priv->host_regs)) {
ret = PTR_ERR(priv->host_regs);
goto err;
}
ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
pata_imx_setup_port(&ap->ioaddr);
ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
(unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
(unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
/* deassert resets */
__raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
PATA_IMX_ATA_CTRL_ATA_RST_B,
priv->host_regs + PATA_IMX_ATA_CONTROL);
/* enable interrupts */
__raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
priv->host_regs + PATA_IMX_ATA_INT_EN);
/* activate */
ret = ata_host_activate(host, irq, ata_sff_interrupt, 0,
&pata_imx_sht);
if (ret)
goto err;
return 0;
err:
clk_disable_unprepare(priv->clk);
return ret;
}
static int pata_imx_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_imx_priv *priv = host->private_data;
ata_host_detach(host);
__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
clk_disable_unprepare(priv->clk);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int pata_imx_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
int ret;
ret = ata_host_suspend(host, PMSG_SUSPEND);
if (!ret) {
__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
priv->ata_ctl =
__raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
clk_disable_unprepare(priv->clk);
}
return ret;
}
static int pata_imx_resume(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
int ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
__raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
priv->host_regs + PATA_IMX_ATA_INT_EN);
ata_host_resume(host);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(pata_imx_pm_ops, pata_imx_suspend, pata_imx_resume);
static const struct of_device_id imx_pata_dt_ids[] = {
{
.compatible = "fsl,imx27-pata",
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
.remove = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
.pm = &pata_imx_pm_ops,
},
};
module_platform_driver(pata_imx_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
MODULE_DESCRIPTION("low-level driver for iMX PATA");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
cristianomatos/android_kernel_google_msm | arch/arm/mach-msm/sdio_al_test.c | 1491 | 162794 | /* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* SDIO-Abstraction-Layer Test Module.
*
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/platform_device.h>
#include <mach/sdio_smem.h>
#include <linux/wakelock.h>
#include <linux/uaccess.h>
#include "sdio_al_private.h"
#include <linux/debugfs.h>
#include <linux/kthread.h>
enum lpm_test_msg_type {
LPM_NO_MSG, /* 0 */
LPM_MSG_SEND, /* 1 */
LPM_MSG_REC, /* 2 */
LPM_SLEEP, /* 3 */
LPM_WAKEUP, /* 4 */
LPM_NOTIFY /* 5 */
};
#define LPM_NO_MSG_NAME "LPM No Event"
#define LPM_MSG_SEND_NAME "LPM Send Msg Event"
#define LPM_MSG_REC_NAME "LPM Receive Msg Event"
#define LPM_SLEEP_NAME "LPM Sleep Event"
#define LPM_WAKEUP_NAME "LPM Wakeup Event"
/** Module name string */
#define TEST_MODULE_NAME "sdio_al_test"
#define TEST_SIGNATURE 0x12345678
#define TEST_CONFIG_SIGNATURE 0xBEEFCAFE
#define MAX_XFER_SIZE (16*1024)
#define SMEM_MAX_XFER_SIZE 0xBC000
#define A2_MIN_PACKET_SIZE 5
#define RMNT_PACKET_SIZE (4*1024)
#define DUN_PACKET_SIZE (2*1024)
#define CSVT_PACKET_SIZE 1700
#define TEST_DBG(x...) if (test_ctx->runtime_debug) pr_info(x)
#define LPM_TEST_NUM_OF_PACKETS 100
#define LPM_MAX_OPEN_CHAN_PER_DEV 4
#define LPM_ARRAY_SIZE (7*LPM_TEST_NUM_OF_PACKETS*LPM_MAX_OPEN_CHAN_PER_DEV)
#define SDIO_LPM_TEST "sdio_lpm_test_reading_task"
#define LPM_TEST_CONFIG_SIGNATURE 0xDEADBABE
#define LPM_MSG_NAME_SIZE 20
#define MAX_STR_SIZE 10
#define MAX_AVG_RTT_TIME_USEC 2500
#define SDIO_RMNT_RTT_PACKET_SIZE 32
#define SDIO_CSVT_RTT_PACKET_SIZE 1900
#define A2_HEADER_OVERHEAD 8
enum rx_process_state {
RX_PROCESS_PACKET_INIT,
RX_PROCESS_A2_HEADER,
RX_PROCESS_PACKET_DATA,
};
enum sdio_test_case_type {
SDIO_TEST_LOOPBACK_HOST,
SDIO_TEST_LOOPBACK_CLIENT,
SDIO_TEST_LPM_HOST_WAKER,
SDIO_TEST_LPM_CLIENT_WAKER,
SDIO_TEST_LPM_RANDOM,
SDIO_TEST_HOST_SENDER_NO_LP,
SDIO_TEST_CLOSE_CHANNEL,
SDIO_TEST_A2_VALIDATION,
/* The following tests are not part of the 9k tests and should be
* kept last in case new tests are added
*/
SDIO_TEST_PERF,
SDIO_TEST_RTT,
SDIO_TEST_MODEM_RESET,
};
struct lpm_task {
struct task_struct *lpm_task;
const char *task_name;
};
struct lpm_entry_type {
enum lpm_test_msg_type msg_type;
char msg_name[LPM_MSG_NAME_SIZE];
u32 counter;
u32 current_ms;
u32 read_avail_mask;
char chan_name[CHANNEL_NAME_SIZE];
};
struct lpm_msg {
u32 signature;
u32 counter;
u32 reserve1;
u32 reserve2;
};
struct test_config_msg {
u32 signature;
u32 test_case;
u32 test_param;
u32 num_packets;
u32 num_iterations;
};
struct test_result_msg {
u32 signature;
u32 is_successful;
};
struct test_work {
struct work_struct work;
struct test_channel *test_ch;
};
enum sdio_channels_ids {
SDIO_RPC,
SDIO_QMI,
SDIO_RMNT,
SDIO_DIAG,
SDIO_DUN,
SDIO_SMEM,
SDIO_CSVT,
SDIO_MAX_CHANNELS
};
enum sdio_test_results {
TEST_NO_RESULT,
TEST_FAILED,
TEST_PASSED
};
enum sdio_lpm_vote_state {
SDIO_NO_VOTE,
SDIO_VOTE_FOR_SLEEP,
SDIO_VOTE_AGAINST_SLEEP
};
struct sdio_test_device {
int open_channels_counter_to_recv;
int open_channels_counter_to_send;
struct lpm_entry_type *lpm_arr;
int array_size;
void *sdio_al_device;
spinlock_t lpm_array_lock;
unsigned long lpm_array_lock_flags;
u32 next_avail_entry_in_array;
struct lpm_task lpm_test_task;
u32 next_mask_id;
u32 read_avail_mask;
int modem_result_per_dev;
int final_result_per_dev;
};
struct test_channel {
struct sdio_channel *ch;
char name[CHANNEL_NAME_SIZE];
int ch_id;
struct sdio_test_device *test_device;
u32 *buf;
u32 buf_size;
struct workqueue_struct *workqueue;
struct test_work test_work;
u32 rx_bytes;
u32 tx_bytes;
wait_queue_head_t wait_q;
atomic_t rx_notify_count;
atomic_t tx_notify_count;
atomic_t any_notify_count;
atomic_t wakeup_client;
atomic_t card_detected_event;
int wait_counter;
int is_used;
int test_type;
int ch_ready;
struct test_config_msg config_msg;
int test_completed;
int test_result;
struct timer_list timer;
int timer_interval_ms;
struct timer_list timeout_timer;
int timeout_ms;
void *sdio_al_device;
int is_ok_to_sleep;
unsigned int packet_length;
int random_packet_size;
int next_index_in_sent_msg_per_chan;
int channel_mask_id;
int modem_result_per_chan;
int notify_counter_per_chan;
int max_burst_size; /* number of writes before close/open */
int card_removed;
};
struct sdio_al_test_debug {
u32 dun_throughput;
u32 rmnt_throughput;
struct dentry *debug_root;
struct dentry *debug_test_result;
struct dentry *debug_dun_throughput;
struct dentry *debug_rmnt_throughput;
struct dentry *rpc_sender_test;
struct dentry *rpc_qmi_diag_sender_test;
struct dentry *smem_test;
struct dentry *smem_rpc_test;
struct dentry *rmnet_a2_validation_test;
struct dentry *dun_a2_validation_test;
struct dentry *rmnet_a2_perf_test;
struct dentry *dun_a2_perf_test;
struct dentry *csvt_a2_perf_test;
struct dentry *rmnet_dun_a2_perf_test;
struct dentry *rpc_sender_rmnet_a2_perf_test;
struct dentry *all_channels_test;
struct dentry *host_sender_no_lp_diag_test;
struct dentry *host_sender_no_lp_diag_rpc_test;
struct dentry *rmnet_small_packets_test;
struct dentry *rmnet_rtt_test;
struct dentry *csvt_rtt_test;
struct dentry *modem_reset_rpc_test;
struct dentry *modem_reset_rmnet_test;
struct dentry *modem_reset_channels_4bit_dev_test;
struct dentry *modem_reset_channels_8bit_dev_test;
struct dentry *modem_reset_all_channels_test;
struct dentry *open_close_test;
struct dentry *open_close_dun_rmnet_test;
struct dentry *close_chan_lpm_test;
struct dentry *lpm_test_client_wakes_host_test;
struct dentry *lpm_test_host_wakes_client_test;
struct dentry *lpm_test_random_single_channel_test;
struct dentry *lpm_test_random_multi_channel_test;
};
struct test_context {
dev_t dev_num;
struct device *dev;
struct cdev *cdev;
int number_of_active_devices;
int max_number_of_devices;
struct sdio_test_device test_dev_arr[MAX_NUM_OF_SDIO_DEVICES];
struct test_channel *test_ch;
struct test_channel *test_ch_arr[SDIO_MAX_CHANNELS];
long testcase;
const char *name;
int exit_flag;
u32 signature;
int runtime_debug;
struct platform_device *smem_pdev;
struct sdio_smem_client *sdio_smem;
int smem_was_init;
u8 *smem_buf;
uint32_t smem_counter;
struct platform_device *csvt_app_pdev;
wait_queue_head_t wait_q;
int test_completed;
int test_result;
struct sdio_al_test_debug debug;
struct wake_lock wake_lock;
unsigned int lpm_pseudo_random_seed;
};
/* FORWARD DECLARATIONS */
static int set_params_loopback_9k(struct test_channel *tch);
static int set_params_smem_test(struct test_channel *tch);
static int set_params_a2_validation(struct test_channel *tch);
static int set_params_a2_perf(struct test_channel *tch);
static int set_params_8k_sender_no_lp(struct test_channel *tch);
static int set_params_a2_small_pkts(struct test_channel *tch);
static int set_params_rtt(struct test_channel *tch);
static int set_params_loopback_9k_close(struct test_channel *tch);
static int close_channel_lpm_test(int channel_num);
static int set_params_lpm_test(struct test_channel *tch,
enum sdio_test_case_type test,
int timer_interval_ms);
static void set_pseudo_random_seed(void);
static int set_params_modem_reset(struct test_channel *tch);
static int test_start(void);
static void rx_cleanup(struct test_channel *test_ch, int *rx_packet_count);
static void sdio_al_test_cleanup_channels(void);
static void notify(void *priv, unsigned channel_event);
#ifdef CONFIG_MSM_SDIO_SMEM
static int sdio_smem_open(struct sdio_smem_client *sdio_smem);
#endif
/*
* Seed for pseudo random time sleeping in Random LPM test.
* If not set, current time in jiffies is used.
*/
static unsigned int seed;
module_param(seed, int, 0);
static struct test_context *test_ctx;
static void sdio_al_test_initial_dev_and_chan(struct test_context *test_ctx)
{
int i = 0;
if (!test_ctx) {
pr_err(TEST_MODULE_NAME ":%s - test_ctx is NULL.\n", __func__);
return;
}
for (i = 0 ; i < MAX_NUM_OF_SDIO_DEVICES ; ++i)
test_ctx->test_dev_arr[i].sdio_al_device = NULL;
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if (!tch)
continue;
tch->is_used = 0;
}
sdio_al_test_cleanup_channels();
}
#ifdef CONFIG_DEBUG_FS
static int message_repeat;
static int sdio_al_test_extract_number(const char __user *buf,
size_t count)
{
int ret = 0;
int number = -1;
char local_buf[MAX_STR_SIZE] = {0};
char *start = NULL;
if (count > MAX_STR_SIZE) {
pr_err(TEST_MODULE_NAME ": %s - MAX_STR_SIZE(%d) < count(%d). "
"Please choose smaller number\n",
__func__, MAX_STR_SIZE, (int)count);
return -EINVAL;
}
if (copy_from_user(local_buf, buf, count)) {
pr_err(TEST_MODULE_NAME ": %s - copy_from_user() failed\n",
__func__);
return -EINVAL;
}
/* adding null termination to the string */
local_buf[count] = '\0';
/* stripping leading and trailing white spaces */
start = strstrip(local_buf);
ret = kstrtoint(start, 10, &number);
if (ret) {
pr_err(TEST_MODULE_NAME " : %s - kstrtoint() failed\n",
__func__);
return ret;
}
return number;
}
static int sdio_al_test_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
message_repeat = 1;
return 0;
}
static void sdio_al_test_cleanup_channels(void)
{
int channel_num;
int dummy = 0;
for (channel_num = 0 ; channel_num < SDIO_MAX_CHANNELS ;
++channel_num) {
if (channel_num == SDIO_SMEM)
continue;
rx_cleanup(test_ctx->test_ch_arr[channel_num], &dummy);
}
return;
}
/* RPC SENDER TEST */
static ssize_t rpc_sender_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- RPC SENDER TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_RPC]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rpc_sender_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRPC_SENDER_TEST\n"
"===============\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rpc_sender_test_ops = {
.open = sdio_al_test_open,
.write = rpc_sender_test_write,
.read = rpc_sender_test_read,
};
/* RPC, QMI & DIAG SENDER TEST */
static ssize_t rpc_qmi_diag_sender_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- RPC, QMI AND DIAG SENDER TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_RPC]);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_QMI]);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_DIAG]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rpc_qmi_diag_sender_test_read(struct file *file,
char __user
*buffer, size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRPC_QMI_DIAG_SENDER_TEST\n"
"========================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rpc_qmi_diag_sender_test_ops = {
.open = sdio_al_test_open,
.write = rpc_qmi_diag_sender_test_write,
.read = rpc_qmi_diag_sender_test_read,
};
/* SMEM TEST */
static ssize_t smem_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- SMEM TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_smem_test(test_ctx->test_ch_arr[SDIO_SMEM]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t smem_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nSMEM_TEST\n"
"=========\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations smem_test_ops = {
.open = sdio_al_test_open,
.write = smem_test_write,
.read = smem_test_read,
};
/* SMEM & RPC TEST */
static ssize_t smem_rpc_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- SMEM AND RPC TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_RPC]);
set_params_smem_test(test_ctx->test_ch_arr[SDIO_SMEM]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t smem_rpc_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nSMEM_RPC_TEST\n"
"=============\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations smem_rpc_test_ops = {
.open = sdio_al_test_open,
.write = smem_rpc_test_write,
.read = smem_rpc_test_read,
};
/* RMNET A2 VALIDATION TEST */
static ssize_t rmnet_a2_validation_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- RMNET A2 VALIDATION TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_a2_validation(test_ctx->test_ch_arr[SDIO_RMNT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rmnet_a2_validation_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRMNET_A2_VALIDATION_TEST\n"
"=========================\n"
"Description:\n"
"In this test, the HOST sends multiple packets to the\n"
"CLIENT and validates the packets loop backed from A2\n"
"for the RMNET channel.\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rmnet_a2_validation_test_ops = {
.open = sdio_al_test_open,
.write = rmnet_a2_validation_test_write,
.read = rmnet_a2_validation_test_read,
};
/* DUN A2 VALIDATION TEST */
static ssize_t dun_a2_validation_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- DUN A2 VALIDATION TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_a2_validation(test_ctx->test_ch_arr[SDIO_DUN]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t dun_a2_validation_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nDUN_A2_VALIDATION_TEST\n"
"=========================\n"
"Description:\n"
"In this test, the HOST sends multiple packets to the\n"
"CLIENT and validates the packets loop backed from A2\n"
"for the DUN channel.\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations dun_a2_validation_test_ops = {
.open = sdio_al_test_open,
.write = dun_a2_validation_test_write,
.read = dun_a2_validation_test_read,
};
/* RMNET A2 PERFORMANCE TEST */
static ssize_t rmnet_a2_perf_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- RMNET A2 PERFORMANCE TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_RMNT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rmnet_a2_perf_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRMNET_A2_PERFORMANCE_TEST\n"
"=========================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rmnet_a2_perf_test_ops = {
.open = sdio_al_test_open,
.write = rmnet_a2_perf_test_write,
.read = rmnet_a2_perf_test_read,
};
/* DUN A2 PERFORMANCE TEST */
static ssize_t dun_a2_perf_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- DUN A2 PERFORMANCE TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_DUN]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t dun_a2_perf_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nDUN_A2_PERFORMANCE_TEST\n"
"=======================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations dun_a2_perf_test_ops = {
.open = sdio_al_test_open,
.write = dun_a2_perf_test_write,
.read = dun_a2_perf_test_read,
};
/* CSVT A2 PERFORMANCE TEST */
static ssize_t csvt_a2_perf_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- CSVT A2 PERFORMANCE TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_CSVT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t csvt_a2_perf_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nCSVT_A2_PERFORMANCE_TEST\n"
"========================\n"
"Description:\n"
"Loopback test on the CSVT Channel, in order to check "
"throughput performance.\n"
"Packet size that are sent on the CSVT channel in this "
"test is %d.bytes\n\n"
"END OF DESCRIPTION\n", CSVT_PACKET_SIZE);
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations csvt_a2_perf_test_ops = {
.open = sdio_al_test_open,
.write = csvt_a2_perf_test_write,
.read = csvt_a2_perf_test_read,
};
/* RMNET DUN A2 PERFORMANCE TEST */
static ssize_t rmnet_dun_a2_perf_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- RMNET AND DUN A2 PERFORMANCE TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_RMNT]);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_DUN]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rmnet_dun_a2_perf_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRMNET_DUN_A2_PERFORMANCE_TEST\n"
"=============================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rmnet_dun_a2_perf_test_ops = {
.open = sdio_al_test_open,
.write = rmnet_dun_a2_perf_test_write,
.read = rmnet_dun_a2_perf_test_read,
};
/* RPC SENDER & RMNET A2 PERFORMANCE TEST */
static ssize_t rpc_sender_rmnet_a2_perf_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "--RPC SENDER AND RMNET A2 "
"PERFORMANCE --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_RPC]);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_RMNT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rpc_sender_rmnet_a2_perf_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRPC_SENDER_RMNET_A2_PERFORMANCE_TEST\n"
"====================================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rpc_sender_rmnet_a2_perf_test_ops = {
.open = sdio_al_test_open,
.write = rpc_sender_rmnet_a2_perf_test_write,
.read = rpc_sender_rmnet_a2_perf_test_read,
};
/* ALL CHANNELS TEST */
static ssize_t all_channels_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- ALL THE CHANNELS TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_RPC]);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_QMI]);
set_params_loopback_9k(test_ctx->test_ch_arr[SDIO_DIAG]);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_RMNT]);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_DUN]);
set_params_smem_test(test_ctx->test_ch_arr[SDIO_SMEM]);
set_params_a2_perf(test_ctx->test_ch_arr[SDIO_CSVT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t all_channels_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nALL_CHANNELS_TEST\n"
"=================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations all_channels_test_ops = {
.open = sdio_al_test_open,
.write = all_channels_test_write,
.read = all_channels_test_read,
};
/* HOST SENDER NO LP DIAG TEST */
static ssize_t host_sender_no_lp_diag_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- HOST SENDER NO LP FOR DIAG TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_8k_sender_no_lp(test_ctx->test_ch_arr[SDIO_DIAG]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t host_sender_no_lp_diag_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nHOST_SENDER_NO_LP_DIAG_TEST\n"
"===========================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations host_sender_no_lp_diag_test_ops = {
.open = sdio_al_test_open,
.write = host_sender_no_lp_diag_test_write,
.read = host_sender_no_lp_diag_test_read,
};
/* HOST SENDER NO LP DIAG, RPC TEST */
static ssize_t host_sender_no_lp_diag_rpc_test_write(
struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- HOST SENDER NO LP FOR DIAG, RPC "
"TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_8k_sender_no_lp(test_ctx->test_ch_arr[SDIO_DIAG]);
set_params_8k_sender_no_lp(test_ctx->test_ch_arr[SDIO_RPC]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t host_sender_no_lp_diag_rpc_test_read(
struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nHOST_SENDER_NO_LP_DIAG_RPC_TEST\n"
"===================================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations host_sender_no_lp_diag_rpc_test_ops = {
.open = sdio_al_test_open,
.write = host_sender_no_lp_diag_rpc_test_write,
.read = host_sender_no_lp_diag_rpc_test_read,
};
/* RMNET SMALL PACKETS TEST */
static ssize_t rmnet_small_packets_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- RMNET SMALL PACKETS (5-128) TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_a2_small_pkts(test_ctx->test_ch_arr[SDIO_RMNT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rmnet_small_packets_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRMNET_SMALL_PACKETS_TEST\n"
"========================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rmnet_small_packets_test_ops = {
.open = sdio_al_test_open,
.write = rmnet_small_packets_test_write,
.read = rmnet_small_packets_test_read,
};
/* RMNET RTT TEST */
static ssize_t rmnet_rtt_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- RMNET RTT TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_rtt(test_ctx->test_ch_arr[SDIO_RMNT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t rmnet_rtt_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nRMNET_RTT_TEST\n"
"==============\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations rmnet_rtt_test_ops = {
.open = sdio_al_test_open,
.write = rmnet_rtt_test_write,
.read = rmnet_rtt_test_read,
};
/* CSVT RTT TEST */
static ssize_t csvt_rtt_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- CSVT RTT TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_rtt(test_ctx->test_ch_arr[SDIO_CSVT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t csvt_rtt_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nCSVT_RTT_TEST\n"
"==============\n"
"Description:\n"
"In this test the HOST send a message of %d bytes "
"to the CLIENT\n\n"
"END OF DESCRIPTION\n", SDIO_CSVT_RTT_PACKET_SIZE);
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations csvt_rtt_test_ops = {
.open = sdio_al_test_open,
.write = csvt_rtt_test_write,
.read = csvt_rtt_test_read,
};
/* MODEM RESET RPC TEST */
static ssize_t modem_reset_rpc_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- MODEM RESET - RPC CHANNEL TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_RPC]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t modem_reset_rpc_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nMODEM_RESET_RPC_TEST\n"
"====================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations modem_reset_rpc_test_ops = {
.open = sdio_al_test_open,
.write = modem_reset_rpc_test_write,
.read = modem_reset_rpc_test_read,
};
/* MODEM RESET RMNET TEST */
static ssize_t modem_reset_rmnet_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- MODEM RESET - RMNT CHANNEL TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_RMNT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t modem_reset_rmnet_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nMODEM_RESET_RMNET_TEST\n"
"======================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations modem_reset_rmnet_test_ops = {
.open = sdio_al_test_open,
.write = modem_reset_rmnet_test_write,
.read = modem_reset_rmnet_test_read,
};
/* MODEM RESET - CHANNELS IN 4BIT DEVICE TEST */
static ssize_t modem_reset_channels_4bit_dev_test_write(
struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- MODEM RESET - ALL CHANNELS IN "
"4BIT DEVICE TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_RPC]);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_QMI]);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_DIAG]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t modem_reset_channels_4bit_dev_test_read(
struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nMODEM_RESET_CHANNELS_4BIT_DEV_TEST\n"
"==================================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations modem_reset_channels_4bit_dev_test_ops = {
.open = sdio_al_test_open,
.write = modem_reset_channels_4bit_dev_test_write,
.read = modem_reset_channels_4bit_dev_test_read,
};
/* MODEM RESET - CHANNELS IN 8BIT DEVICE TEST */
static ssize_t modem_reset_channels_8bit_dev_test_write(
struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- MODEM RESET - ALL CHANNELS IN "
"8BIT DEVICE TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_RMNT]);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_DUN]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t modem_reset_channels_8bit_dev_test_read(
struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nMODEM_RESET_CHANNELS_8BIT_DEV_TEST\n"
"==================================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations modem_reset_channels_8bit_dev_test_ops = {
.open = sdio_al_test_open,
.write = modem_reset_channels_8bit_dev_test_write,
.read = modem_reset_channels_8bit_dev_test_read,
};
/* MODEM RESET - ALL CHANNELS TEST */
static ssize_t modem_reset_all_channels_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- MODEM RESET - ALL CHANNELS TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_RPC]);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_QMI]);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_DIAG]);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_RMNT]);
set_params_modem_reset(test_ctx->test_ch_arr[SDIO_DUN]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t modem_reset_all_channels_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nMODEM_RESET_ALL_CHANNELS_TEST\n"
"=============================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations modem_reset_all_channels_test_ops = {
.open = sdio_al_test_open,
.write = modem_reset_all_channels_test_write,
.read = modem_reset_all_channels_test_read,
};
/* HOST SENDER WITH OPEN/CLOSE TEST */
static ssize_t open_close_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
struct test_channel **ch_arr = test_ctx->test_ch_arr;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- HOST SENDER WITH OPEN/CLOSE TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_loopback_9k_close(ch_arr[SDIO_DIAG]);
set_params_loopback_9k_close(ch_arr[SDIO_RPC]);
set_params_loopback_9k_close(ch_arr[SDIO_SMEM]);
set_params_loopback_9k_close(ch_arr[SDIO_QMI]);
set_params_loopback_9k_close(ch_arr[SDIO_RMNT]);
set_params_loopback_9k_close(ch_arr[SDIO_DUN]);
set_params_loopback_9k_close(ch_arr[SDIO_CSVT]);
ret = test_start();
if (ret)
break;
pr_info(TEST_MODULE_NAME " -- correctness test for"
"DIAG ");
set_params_loopback_9k(ch_arr[SDIO_DIAG]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t open_close_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nOPEN_CLOSE_TEST\n"
"============================\n"
"Description:\n"
"In this test the host sends 5k packets to the modem in the "
"following sequence: Send a random burst of packets on "
"Diag and Rmnet channels, read 0 or a random number "
"of packets, close and re-open the channel. At the end of the "
"test, the channel is verified by running a loopback test\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations open_close_test_ops = {
.open = sdio_al_test_open,
.write = open_close_test_write,
.read = open_close_test_read,
};
/* HOST SENDER WITH OPEN/CLOSE FOR DUN & RMNET TEST */
static ssize_t open_close_dun_rmnet_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
struct test_channel **ch_arr = test_ctx->test_ch_arr;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- HOST SENDER WITH OPEN/CLOSE FOR "
"DUN AND RMNET TEST --");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_loopback_9k_close(ch_arr[SDIO_DUN]);
set_params_loopback_9k_close(ch_arr[SDIO_RMNT]);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t open_close_dun_rmnet_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nOPEN_CLOSE_DUN_RMNET_TEST\n"
"============================\n"
"Description:\n"
"In this test the host sends 5k packets to the modem in the "
"following sequence: Send a random burst of packets on "
"DUN and Rmnet channels, read 0 or a random number "
"of packets, close and re-open the channel.\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations open_close_dun_rmnet_test_ops = {
.open = sdio_al_test_open,
.write = open_close_dun_rmnet_test_write,
.read = open_close_dun_rmnet_test_read,
};
/* CLOSE CHANNEL & LPM TEST HOST WAKES THE CLIENT TEST */
static ssize_t close_chan_lpm_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int channel_num = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- CLOSE CHANNEL & LPM TEST "
"HOST WAKES THE CLIENT TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
for (channel_num = 0 ; channel_num < SDIO_MAX_CHANNELS ;
channel_num++) {
ret = close_channel_lpm_test(channel_num);
if (ret)
break;
set_params_lpm_test(test_ctx->test_ch_arr[SDIO_RPC],
SDIO_TEST_LPM_HOST_WAKER, 120);
ret = test_start();
if (ret)
break;
}
if (ret) {
pr_err(TEST_MODULE_NAME " -- Close channel & LPM Test "
"FAILED: %d --\n", ret);
} else {
pr_err(TEST_MODULE_NAME " -- Close channel & LPM Test "
"PASSED\n");
}
}
return count;
}
static ssize_t close_chan_lpm_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nCLOSE_CHAN_LPM_TEST\n"
"===================\n"
"Description:\n"
"TBD\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations close_chan_lpm_test_ops = {
.open = sdio_al_test_open,
.write = close_chan_lpm_test_write,
.read = close_chan_lpm_test_read,
};
/* LPM TEST FOR DEVICE 1. CLIENT WAKES THE HOST TEST */
static ssize_t lpm_test_client_wakes_host_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- LPM TEST FOR DEVICE 1. CLIENT "
"WAKES THE HOST TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_lpm_test(test_ctx->test_ch_arr[SDIO_RPC],
SDIO_TEST_LPM_CLIENT_WAKER, 90);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t lpm_test_client_wakes_host_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nLPM_TEST_CLIENT_WAKES_HOST_TEST\n"
"===============================\n"
"Description:\n"
"In this test, the HOST is going into LPM mode,\n"
"and the CLIENT is responsible to send it a message\n"
"in order to wake it up\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations lpm_test_client_wakes_host_test_ops = {
.open = sdio_al_test_open,
.write = lpm_test_client_wakes_host_test_write,
.read = lpm_test_client_wakes_host_test_read,
};
/* LPM TEST FOR DEVICE 1. HOST WAKES THE CLIENT TEST */
static ssize_t lpm_test_host_wakes_client_test_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- LPM TEST FOR DEVICE 1. HOST "
"WAKES THE CLIENT TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_params_lpm_test(test_ctx->test_ch_arr[SDIO_RPC],
SDIO_TEST_LPM_HOST_WAKER, 120);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t lpm_test_host_wakes_client_test_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nLPM_TEST_HOST_WAKES_CLIENT_TEST\n"
"===============================\n"
"Description:\n"
"In this test, the CLIENT goes into LPM mode, and the\n"
"HOST is responsible to send it a message\n"
"in order to wake it up\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations lpm_test_host_wakes_client_test_ops = {
.open = sdio_al_test_open,
.write = lpm_test_host_wakes_client_test_write,
.read = lpm_test_host_wakes_client_test_read,
};
/* LPM TEST RANDOM, SINGLE CHANNEL TEST */
static ssize_t lpm_test_random_single_channel_test_write(
struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- LPM TEST RANDOM SINGLE "
"CHANNEL TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_pseudo_random_seed();
set_params_lpm_test(test_ctx->test_ch_arr[SDIO_RPC],
SDIO_TEST_LPM_RANDOM, 0);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t lpm_test_random_single_channel_test_read(
struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nLPM_TEST_RANDOM_SINGLE_CHANNEL_TEST\n"
"===================================\n"
"Description:\n"
"In this test, the HOST and CLIENT "
"send messages to each other,\n"
"random in time, over RPC channel only.\n"
"All events are being recorded, and later on,\n"
"they are being analysed by the HOST and by the CLIENT\n,"
"in order to check if the LPM mechanism worked properly,\n"
"meaning:"
" When all the relevant conditions are met, a device should:\n"
"1. Go to sleep\n"
"2. Wake up\n"
"3. Stay awake\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations lpm_test_random_single_channel_test_ops = {
.open = sdio_al_test_open,
.write = lpm_test_random_single_channel_test_write,
.read = lpm_test_random_single_channel_test_read,
};
/* LPM TEST RANDOM, MULTI CHANNEL TEST */
static ssize_t lpm_test_random_multi_channel_test_write(
struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int ret = 0;
int i = 0;
int number = -1;
pr_info(TEST_MODULE_NAME "-- LPM TEST RANDOM MULTI CHANNEL TEST --\n");
number = sdio_al_test_extract_number(buf, count);
if (number < 0) {
pr_err(TEST_MODULE_NAME " : %s - sdio_al_test_extract_number() "
"failed. number = %d\n", __func__, number);
return count;
}
for (i = 0 ; i < number ; ++i) {
pr_info(TEST_MODULE_NAME " - Cycle # %d / %d\n", i+1, number);
pr_info(TEST_MODULE_NAME " ===================");
sdio_al_test_initial_dev_and_chan(test_ctx);
set_pseudo_random_seed();
set_params_lpm_test(test_ctx->test_ch_arr[SDIO_RPC],
SDIO_TEST_LPM_RANDOM, 0);
set_params_lpm_test(test_ctx->test_ch_arr[SDIO_DIAG],
SDIO_TEST_LPM_RANDOM, 0);
set_params_lpm_test(test_ctx->test_ch_arr[SDIO_QMI],
SDIO_TEST_LPM_RANDOM, 0);
ret = test_start();
if (ret)
break;
}
return count;
}
static ssize_t lpm_test_random_multi_channel_test_read(
struct file *file,
char __user *buffer,
size_t count,
loff_t *offset)
{
memset((void *)buffer, 0, count);
snprintf(buffer, count,
"\nLPM_TEST_RANDOM_MULTI_CHANNEL_TEST\n"
"==================================\n"
"Description:\n"
"In this test, the HOST and CLIENT "
"send messages to each other,\n"
"random in time, over RPC, QMI AND DIAG channels\n"
"(i.e, on both SDIO devices).\n"
"All events are being recorded, and later on,\n"
"they are being analysed by the HOST and by the CLIENT,\n"
"in order to check if the LPM mechanism worked properly,\n"
"meaning:"
" When all the relevant conditions are met, a device should:\n"
"1. Go to sleep\n"
"2. Wake up\n"
"3. Stay awake\n\n"
"END OF DESCRIPTION\n");
if (message_repeat == 1) {
message_repeat = 0;
return strnlen(buffer, count);
} else {
return 0;
}
}
const struct file_operations lpm_test_random_multi_channel_test_ops = {
.open = sdio_al_test_open,
.write = lpm_test_random_multi_channel_test_write,
.read = lpm_test_random_multi_channel_test_read,
};
static int sdio_al_test_debugfs_init(void)
{
test_ctx->debug.debug_root = debugfs_create_dir("sdio_al_test",
NULL);
if (!test_ctx->debug.debug_root)
return -ENOENT;
test_ctx->debug.debug_test_result = debugfs_create_u32(
"test_result",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
&test_ctx->test_result);
test_ctx->debug.debug_dun_throughput = debugfs_create_u32(
"dun_throughput",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
&test_ctx->debug.dun_throughput);
test_ctx->debug.debug_rmnt_throughput = debugfs_create_u32(
"rmnt_throughput",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
&test_ctx->debug.rmnt_throughput);
test_ctx->debug.rpc_sender_test =
debugfs_create_file("10_rpc_sender_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rpc_sender_test_ops);
test_ctx->debug.rpc_qmi_diag_sender_test =
debugfs_create_file("20_rpc_qmi_diag_sender_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rpc_qmi_diag_sender_test_ops);
test_ctx->debug.rmnet_a2_validation_test =
debugfs_create_file("30_rmnet_a2_validation_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rmnet_a2_validation_test_ops);
test_ctx->debug.dun_a2_validation_test =
debugfs_create_file("40_dun_a2_validation_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&dun_a2_validation_test_ops);
test_ctx->debug.rmnet_a2_perf_test =
debugfs_create_file("50_rmnet_a2_perf_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rmnet_a2_perf_test_ops);
test_ctx->debug.dun_a2_perf_test =
debugfs_create_file("60_dun_a2_perf_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&dun_a2_perf_test_ops);
test_ctx->debug.csvt_a2_perf_test =
debugfs_create_file("71_csvt_a2_perf_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&csvt_a2_perf_test_ops);
test_ctx->debug.rmnet_dun_a2_perf_test =
debugfs_create_file("70_rmnet_dun_a2_perf_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rmnet_dun_a2_perf_test_ops);
test_ctx->debug.rpc_sender_rmnet_a2_perf_test =
debugfs_create_file("80_rpc_sender_rmnet_a2_perf_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rpc_sender_rmnet_a2_perf_test_ops);
test_ctx->debug.smem_test =
debugfs_create_file("90_smem_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&smem_test_ops);
test_ctx->debug.smem_rpc_test =
debugfs_create_file("100_smem_rpc_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&smem_rpc_test_ops);
test_ctx->debug.all_channels_test =
debugfs_create_file("150_all_channels_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&all_channels_test_ops);
test_ctx->debug.host_sender_no_lp_diag_test =
debugfs_create_file("160_host_sender_no_lp_diag_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&host_sender_no_lp_diag_test_ops);
test_ctx->debug.host_sender_no_lp_diag_rpc_test =
debugfs_create_file("170_host_sender_no_lp_diag_rpc_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&host_sender_no_lp_diag_rpc_test_ops);
test_ctx->debug.rmnet_small_packets_test =
debugfs_create_file("180_rmnet_small_packets_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rmnet_small_packets_test_ops);
test_ctx->debug.rmnet_rtt_test =
debugfs_create_file("190_rmnet_rtt_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&rmnet_rtt_test_ops);
test_ctx->debug.csvt_rtt_test =
debugfs_create_file("191_csvt_rtt_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&csvt_rtt_test_ops);
test_ctx->debug.modem_reset_rpc_test =
debugfs_create_file("220_modem_reset_rpc_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&modem_reset_rpc_test_ops);
test_ctx->debug.modem_reset_rmnet_test =
debugfs_create_file("230_modem_reset_rmnet_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&modem_reset_rmnet_test_ops);
test_ctx->debug.modem_reset_channels_4bit_dev_test =
debugfs_create_file("240_modem_reset_channels_4bit_dev_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&modem_reset_channels_4bit_dev_test_ops);
test_ctx->debug.modem_reset_channels_8bit_dev_test =
debugfs_create_file("250_modem_reset_channels_8bit_dev_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&modem_reset_channels_8bit_dev_test_ops);
test_ctx->debug.modem_reset_all_channels_test =
debugfs_create_file("260_modem_reset_all_channels_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&modem_reset_all_channels_test_ops);
test_ctx->debug.open_close_test =
debugfs_create_file("270_open_close_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&open_close_test_ops);
test_ctx->debug.open_close_dun_rmnet_test =
debugfs_create_file("271_open_close_dun_rmnet_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&open_close_dun_rmnet_test_ops);
test_ctx->debug.close_chan_lpm_test =
debugfs_create_file("280_close_chan_lpm_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&close_chan_lpm_test_ops);
test_ctx->debug.lpm_test_client_wakes_host_test =
debugfs_create_file("600_lpm_test_client_wakes_host_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&lpm_test_client_wakes_host_test_ops);
test_ctx->debug.lpm_test_host_wakes_client_test =
debugfs_create_file("610_lpm_test_host_wakes_client_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&lpm_test_host_wakes_client_test_ops);
test_ctx->debug.lpm_test_random_single_channel_test =
debugfs_create_file("620_lpm_test_random_single_channel_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&lpm_test_random_single_channel_test_ops);
test_ctx->debug.lpm_test_random_multi_channel_test =
debugfs_create_file("630_lpm_test_random_multi_channel_test",
S_IRUGO | S_IWUGO,
test_ctx->debug.debug_root,
NULL,
&lpm_test_random_multi_channel_test_ops);
if ((!test_ctx->debug.debug_dun_throughput) &&
(!test_ctx->debug.debug_rmnt_throughput)) {
debugfs_remove_recursive(test_ctx->debug.debug_root);
test_ctx->debug.debug_root = NULL;
return -ENOENT;
}
return 0;
}
static void sdio_al_test_debugfs_cleanup(void)
{
debugfs_remove(test_ctx->debug.debug_dun_throughput);
debugfs_remove(test_ctx->debug.debug_rmnt_throughput);
debugfs_remove(test_ctx->debug.debug_root);
}
#endif
static int channel_name_to_id(char *name)
{
pr_info(TEST_MODULE_NAME "%s: channel name %s\n",
__func__, name);
if (!strncmp(name, "SDIO_RPC_TEST",
strnlen("SDIO_RPC_TEST", CHANNEL_NAME_SIZE)))
return SDIO_RPC;
else if (!strncmp(name, "SDIO_QMI_TEST",
strnlen("SDIO_QMI_TEST", TEST_CH_NAME_SIZE)))
return SDIO_QMI;
else if (!strncmp(name, "SDIO_RMNT_TEST",
strnlen("SDIO_RMNT_TEST", TEST_CH_NAME_SIZE)))
return SDIO_RMNT;
else if (!strncmp(name, "SDIO_DIAG_TEST",
strnlen("SDIO_DIAG", TEST_CH_NAME_SIZE)))
return SDIO_DIAG;
else if (!strncmp(name, "SDIO_DUN_TEST",
strnlen("SDIO_DUN_TEST", TEST_CH_NAME_SIZE)))
return SDIO_DUN;
else if (!strncmp(name, "SDIO_SMEM_TEST",
strnlen("SDIO_SMEM_TEST", TEST_CH_NAME_SIZE)))
return SDIO_SMEM;
else if (!strncmp(name, "SDIO_CSVT_TEST",
strnlen("SDIO_CSVT_TEST", TEST_CH_NAME_SIZE)))
return SDIO_CSVT;
else
return SDIO_MAX_CHANNELS;
return SDIO_MAX_CHANNELS;
}
/**
* Allocate and add SDIO_SMEM platform device
*/
#ifdef CONFIG_MSM_SDIO_SMEM
static int add_sdio_smem(void)
{
int ret = 0;
test_ctx->smem_pdev = platform_device_alloc("SDIO_SMEM", -1);
ret = platform_device_add(test_ctx->smem_pdev);
if (ret) {
pr_err(TEST_MODULE_NAME ": platform_device_add failed, "
"ret=%d\n", ret);
return ret;
}
return 0;
}
#endif
static int open_sdio_ch(struct test_channel *tch)
{
int ret = 0;
if (!tch) {
pr_err(TEST_MODULE_NAME ": %s NULL tch\n", __func__);
return -EINVAL;
}
if (!tch->ch_ready) {
TEST_DBG(TEST_MODULE_NAME ":openning channel %s\n",
tch->name);
if (tch->ch_id == SDIO_SMEM) {
#ifdef CONFIG_MSM_SDIO_SMEM
if (!test_ctx->smem_pdev)
ret = add_sdio_smem();
else
ret = sdio_smem_open(test_ctx->sdio_smem);
if (ret) {
pr_err(TEST_MODULE_NAME
":openning channel %s failed\n",
tch->name);
tch->ch_ready = false;
return -EINVAL;
}
#endif
} else {
tch->ch_ready = true;
ret = sdio_open(tch->name , &tch->ch, tch,
notify);
if (ret) {
pr_err(TEST_MODULE_NAME
":openning channel %s failed\n",
tch->name);
tch->ch_ready = false;
return -EINVAL;
}
}
}
return ret;
}
static int close_sdio_ch(struct test_channel *tch)
{
int ret = 0;
if (!tch) {
pr_err(TEST_MODULE_NAME ": %s NULL tch\n", __func__);
return -EINVAL;
}
if (tch->ch_id == SDIO_SMEM) {
#ifdef CONFIG_MSM_SDIO_SMEM
TEST_DBG(TEST_MODULE_NAME":%s closing channel %s",
__func__, tch->name);
ret = sdio_smem_unregister_client();
test_ctx->smem_counter = 0;
#endif
} else {
ret = sdio_close(tch->ch);
}
if (ret) {
pr_err(TEST_MODULE_NAME":%s close channel %s"
" failed\n", __func__, tch->name);
} else {
TEST_DBG(TEST_MODULE_NAME":%s close channel %s"
" success\n", __func__, tch->name);
tch->ch_ready = false;
}
return ret;
}
/**
* Config message
*/
static void send_config_msg(struct test_channel *test_ch)
{
int ret = 0 ;
u32 write_avail = 0;
int size = sizeof(test_ch->config_msg);
pr_debug(TEST_MODULE_NAME "%s\n", __func__);
memcpy(test_ch->buf, (void *)&test_ch->config_msg, size);
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
pr_info(TEST_MODULE_NAME ":Sending the config message.\n");
/* wait for data ready event */
write_avail = sdio_write_avail(test_ch->ch);
pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
if (write_avail < size) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->tx_notify_count));
atomic_dec(&test_ch->tx_notify_count);
}
write_avail = sdio_write_avail(test_ch->ch);
pr_debug(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
if (write_avail < size) {
pr_info(TEST_MODULE_NAME ":not enough write avail.\n");
return;
}
ret = sdio_write(test_ch->ch, test_ch->buf, size);
if (ret)
pr_err(TEST_MODULE_NAME ":%s sdio_write err=%d.\n",
__func__, -ret);
else
pr_info(TEST_MODULE_NAME ":%s sent config_msg successfully.\n",
__func__);
}
/**
* Loopback Test
*/
static void loopback_test(struct test_channel *test_ch)
{
int ret = 0 ;
u32 read_avail = 0;
u32 write_avail = 0;
while (1) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
TEST_DBG(TEST_MODULE_NAME "--LOOPBACK WAIT FOR EVENT--.\n");
/* wait for data ready event */
wait_event(test_ch->wait_q,
atomic_read(&test_ch->rx_notify_count));
atomic_dec(&test_ch->rx_notify_count);
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail == 0)
continue;
write_avail = sdio_write_avail(test_ch->ch);
if (write_avail < read_avail) {
pr_info(TEST_MODULE_NAME
":not enough write avail.\n");
continue;
}
ret = sdio_read(test_ch->ch, test_ch->buf, read_avail);
if (ret) {
pr_info(TEST_MODULE_NAME
":worker, sdio_read err=%d.\n", -ret);
continue;
}
test_ch->rx_bytes += read_avail;
TEST_DBG(TEST_MODULE_NAME ":worker total rx bytes = 0x%x.\n",
test_ch->rx_bytes);
ret = sdio_write(test_ch->ch,
test_ch->buf, read_avail);
if (ret) {
pr_info(TEST_MODULE_NAME
":loopback sdio_write err=%d.\n",
-ret);
continue;
}
test_ch->tx_bytes += read_avail;
TEST_DBG(TEST_MODULE_NAME
":loopback total tx bytes = 0x%x.\n",
test_ch->tx_bytes);
} /* end of while */
}
/**
* Check if all tests completed
*/
static void check_test_completion(void)
{
int i;
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if ((!tch) || (!tch->is_used) || (!tch->ch_ready))
continue;
if (!tch->test_completed) {
pr_info(TEST_MODULE_NAME ": %s - Channel %s test is "
"not completed", __func__, tch->name);
return;
}
}
pr_info(TEST_MODULE_NAME ": %s - Test is completed", __func__);
test_ctx->test_completed = 1;
wake_up(&test_ctx->wait_q);
}
static int pseudo_random_seed(unsigned int *seed_number)
{
if (!seed_number)
return 0;
*seed_number = (unsigned int)(((unsigned long)*seed_number *
(unsigned long)1103515367) + 35757);
return (int)((*seed_number / (64*1024)) % 500);
}
/* this function must be locked before accessing it */
static void lpm_test_update_entry(struct test_channel *tch,
enum lpm_test_msg_type msg_type,
char *msg_name,
int counter)
{
u32 index = 0;
static int print_full = 1;
struct sdio_test_device *test_device;
if (!tch) {
pr_err(TEST_MODULE_NAME ": %s - NULL test channel\n", __func__);
return;
}
test_device = tch->test_device;
if (!test_device) {
pr_err(TEST_MODULE_NAME ": %s - NULL test device\n", __func__);
return;
}
if (!test_device->lpm_arr) {
pr_err(TEST_MODULE_NAME ": %s - NULL lpm_arr\n", __func__);
return;
}
if (test_device->next_avail_entry_in_array >=
test_device->array_size) {
pr_err(TEST_MODULE_NAME ": %s - lpm array is full",
__func__);
if (print_full) {
print_hex_dump(KERN_INFO, TEST_MODULE_NAME ": lpm_arr:",
0, 32, 2,
(void *)test_device->lpm_arr,
sizeof(test_device->lpm_arr), false);
print_full = 0;
}
return;
}
index = test_device->next_avail_entry_in_array;
if ((msg_type == LPM_MSG_SEND) || (msg_type == LPM_MSG_REC))
test_device->lpm_arr[index].counter = counter;
else
test_device->lpm_arr[index].counter = 0;
test_device->lpm_arr[index].msg_type = msg_type;
memcpy(test_device->lpm_arr[index].msg_name, msg_name,
LPM_MSG_NAME_SIZE);
test_device->lpm_arr[index].current_ms =
jiffies_to_msecs(get_jiffies_64());
test_device->lpm_arr[index].read_avail_mask =
test_device->read_avail_mask;
if ((msg_type == LPM_SLEEP) || (msg_type == LPM_WAKEUP))
memcpy(test_device->lpm_arr[index].chan_name, "DEVICE ",
CHANNEL_NAME_SIZE);
else
memcpy(test_device->lpm_arr[index].chan_name, tch->name,
CHANNEL_NAME_SIZE);
test_device->next_avail_entry_in_array++;
}
static int wait_for_result_msg(struct test_channel *test_ch)
{
u32 read_avail = 0;
int ret = 0;
pr_info(TEST_MODULE_NAME ": %s - START, channel %s\n",
__func__, test_ch->name);
while (1) {
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail == 0) {
pr_info(TEST_MODULE_NAME
": read_avail is 0 for chan %s\n",
test_ch->name);
wait_event(test_ch->wait_q,
atomic_read(&test_ch->rx_notify_count));
atomic_dec(&test_ch->rx_notify_count);
continue;
}
memset(test_ch->buf, 0x00, test_ch->buf_size);
ret = sdio_read(test_ch->ch, test_ch->buf, read_avail);
if (ret) {
pr_info(TEST_MODULE_NAME ": sdio_read for chan"
"%s failed, err=%d.\n",
test_ch->name, -ret);
goto exit_err;
}
if (test_ch->buf[0] != TEST_CONFIG_SIGNATURE) {
pr_info(TEST_MODULE_NAME ": Not a test_result "
"signature. expected 0x%x. received 0x%x "
"for chan %s\n",
TEST_CONFIG_SIGNATURE,
test_ch->buf[0],
test_ch->name);
continue;
} else {
pr_info(TEST_MODULE_NAME ": Signature is "
"TEST_CONFIG_SIGNATURE as expected for"
"channel %s\n", test_ch->name);
break;
}
}
return test_ch->buf[1];
exit_err:
return 0;
}
static void print_random_lpm_test_array(struct sdio_test_device *test_dev)
{
int i;
if (!test_dev) {
pr_err(TEST_MODULE_NAME ": %s - NULL test device\n", __func__);
return;
}
for (i = 0 ; i < test_dev->next_avail_entry_in_array ; ++i) {
if (i == 0)
pr_err(TEST_MODULE_NAME ": index %4d, chan=%2s, "
"code=%1d=%4s, msg#%1d, ms from before=-1, "
"read_mask=0x%d, ms=%2u",
i,
test_dev->lpm_arr[i].chan_name,
test_dev->lpm_arr[i].msg_type,
test_dev->lpm_arr[i].msg_name,
test_dev->lpm_arr[i].counter,
test_dev->lpm_arr[i].read_avail_mask,
test_dev->lpm_arr[i].current_ms);
else
pr_err(TEST_MODULE_NAME ": index "
"%4d, %2s, code=%1d=%4s, msg#%1d, ms from "
"before=%2u, read_mask=0x%d, ms=%2u",
i,
test_dev->lpm_arr[i].chan_name,
test_dev->lpm_arr[i].msg_type,
test_dev->lpm_arr[i].msg_name,
test_dev->lpm_arr[i].counter,
test_dev->lpm_arr[i].current_ms -
test_dev->lpm_arr[i-1].current_ms,
test_dev->lpm_arr[i].read_avail_mask,
test_dev->lpm_arr[i].current_ms);
udelay(1000);
}
}
static int check_random_lpm_test_array(struct sdio_test_device *test_dev)
{
int i = 0, j = 0;
unsigned int delta_ms = 0;
int arr_ind = 0;
int ret = 0;
int notify_counter = 0;
int sleep_counter = 0;
int wakeup_counter = 0;
int lpm_activity_counter = 0;
if (!test_dev) {
pr_err(TEST_MODULE_NAME ": %s - NULL test device\n", __func__);
return -ENODEV;
}
for (i = 0; i < test_dev->next_avail_entry_in_array; i++) {
notify_counter = 0;
sleep_counter = 0;
wakeup_counter = 0;
if ((test_dev->lpm_arr[i].msg_type == LPM_MSG_SEND) ||
(test_dev->lpm_arr[i].msg_type == LPM_MSG_REC)) {
/* find the next message in the array */
arr_ind = test_dev->next_avail_entry_in_array;
for (j = i+1; j < arr_ind; j++) {
if ((test_dev->lpm_arr[j].msg_type ==
LPM_MSG_SEND) ||
(test_dev->lpm_arr[j].msg_type ==
LPM_MSG_REC) ||
(test_dev->lpm_arr[j].msg_type ==
LPM_NOTIFY))
break;
if (test_dev->lpm_arr[j].msg_type ==
LPM_SLEEP)
sleep_counter++;
if (test_dev->lpm_arr[j].msg_type ==
LPM_WAKEUP)
wakeup_counter++;
}
if (j == arr_ind) {
ret = 0;
break;
}
delta_ms = test_dev->lpm_arr[j].current_ms -
test_dev->lpm_arr[i].current_ms;
if (delta_ms < 30) {
if ((sleep_counter == 0)
&& (wakeup_counter == 0)) {
continue;
} else {
pr_err(TEST_MODULE_NAME "%s: lpm "
"activity while delta is less "
"than 30, i=%d, j=%d, "
"sleep_counter=%d, "
"wakeup_counter=%d",
__func__, i, j,
sleep_counter, wakeup_counter);
ret = -ENODEV;
break;
}
} else {
if ((delta_ms > 90) &&
(test_dev->lpm_arr[i].
read_avail_mask == 0)) {
if (j != i+3) {
pr_err(TEST_MODULE_NAME
"%s: unexpected "
"lpm activity "
"while delta is "
"bigger than "
"90, i=%d, "
"j=%d, "
"notify_counter"
"=%d",
__func__, i, j,
notify_counter);
ret = -ENODEV;
break;
}
lpm_activity_counter++;
}
}
}
}
pr_info(TEST_MODULE_NAME ": %s - lpm_activity_counter=%d",
__func__, lpm_activity_counter);
return ret;
}
static int lpm_test_main_task(void *ptr)
{
u32 read_avail = 0;
int last_msg_index = 0;
struct test_channel *test_ch = (struct test_channel *)ptr;
struct sdio_test_device *test_dev;
struct lpm_msg lpm_msg;
int ret = 0;
int host_result = 0;
if (!test_ch) {
pr_err(TEST_MODULE_NAME ": %s - NULL channel\n", __func__);
return -ENODEV;
}
pr_err(TEST_MODULE_NAME ": %s - STARTED. channel %s\n",
__func__, test_ch->name);
test_dev = test_ch->test_device;
if (!test_dev) {
pr_err(TEST_MODULE_NAME ": %s - NULL Test Device\n", __func__);
return -ENODEV;
}
while (last_msg_index < test_ch->config_msg.num_packets - 1) {
TEST_DBG(TEST_MODULE_NAME ": %s - "
"IN LOOP last_msg_index=%d\n",
__func__, last_msg_index);
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail == 0) {
TEST_DBG(TEST_MODULE_NAME
":read_avail 0 for chan %s, "
"wait for event\n",
test_ch->name);
wait_event(test_ch->wait_q,
atomic_read(&test_ch->rx_notify_count));
atomic_dec(&test_ch->rx_notify_count);
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail == 0) {
pr_err(TEST_MODULE_NAME
":read_avail size %d for chan %s not as"
" expected\n",
read_avail, test_ch->name);
continue;
}
}
memset(test_ch->buf, 0x00, sizeof(test_ch->buf));
ret = sdio_read(test_ch->ch, test_ch->buf, read_avail);
if (ret) {
pr_info(TEST_MODULE_NAME ":sdio_read for chan %s"
" err=%d.\n", test_ch->name, -ret);
goto exit_err;
}
memcpy((void *)&lpm_msg, test_ch->buf, sizeof(lpm_msg));
/*
* when reading from channel, we want to turn off the bit
* mask that implies that there is pending data on that channel
*/
if (test_ch->test_device != NULL) {
spin_lock_irqsave(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
test_ch->notify_counter_per_chan--;
/*
* if the channel has no pending data, turn off the
* pending data bit mask of the channel
*/
if (test_ch->notify_counter_per_chan == 0) {
test_ch->test_device->read_avail_mask =
test_ch->test_device->read_avail_mask &
~test_ch->channel_mask_id;
}
last_msg_index = lpm_msg.counter;
lpm_test_update_entry(test_ch,
LPM_MSG_REC,
"RECEIVE",
last_msg_index);
spin_unlock_irqrestore(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
}
}
pr_info(TEST_MODULE_NAME ":%s: Finished to recieve all (%d) "
"packets from the modem %s. Waiting for result_msg",
__func__, test_ch->config_msg.num_packets, test_ch->name);
/* Wait for the resault message from the modem */
test_ch->modem_result_per_chan = wait_for_result_msg(test_ch);
/*
* the DEVICE modem result is a failure if one of the channels on
* that device, got modem_result = 0. this is why we bitwise "AND" each
* time another channel completes its task
*/
test_dev->modem_result_per_dev &= test_ch->modem_result_per_chan;
/*
* when reading from channel, we want to turn off the bit
* mask that implies that there is pending data on that channel
*/
spin_lock_irqsave(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
test_dev->open_channels_counter_to_recv--;
/* turning off the read_avail bit of the channel */
test_ch->test_device->read_avail_mask =
test_ch->test_device->read_avail_mask &
~test_ch->channel_mask_id;
spin_unlock_irqrestore(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
/* Wait for all the packets to be sent to the modem */
while (1) {
spin_lock_irqsave(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
if (test_ch->next_index_in_sent_msg_per_chan >=
test_ch->config_msg.num_packets - 1) {
spin_unlock_irqrestore(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
break;
} else {
pr_info(TEST_MODULE_NAME ":%s: Didn't finished to send "
"all packets, "
"next_index_in_sent_msg_per_chan = %d ",
__func__,
test_ch->next_index_in_sent_msg_per_chan);
}
spin_unlock_irqrestore(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
msleep(60);
}
/*
* if device has still open channels to test, then the test on the
* device is still running but the test on current channel is completed
*/
if (test_dev->open_channels_counter_to_recv != 0 ||
test_dev->open_channels_counter_to_send != 0) {
test_ch->test_completed = 1;
return 0;
} else {
test_ctx->number_of_active_devices--;
sdio_al_unregister_lpm_cb(test_ch->sdio_al_device);
if (test_ch->test_type == SDIO_TEST_LPM_RANDOM)
host_result = check_random_lpm_test_array(test_dev);
if (host_result ||
!test_dev->modem_result_per_dev ||
test_ctx->runtime_debug)
print_random_lpm_test_array(test_dev);
pr_info(TEST_MODULE_NAME ": %s - host_result=%d.(0 for "
"SUCCESS) device_modem_result=%d (1 for SUCCESS)",
__func__, host_result, test_dev->modem_result_per_dev);
test_ch->test_completed = 1;
if (test_dev->modem_result_per_dev && !host_result) {
pr_info(TEST_MODULE_NAME ": %s - Random LPM "
"TEST_PASSED for device %d of %d\n",
__func__,
(test_ctx->max_number_of_devices-
test_ctx->number_of_active_devices),
test_ctx->max_number_of_devices);
test_dev->final_result_per_dev = 1; /* PASSED */
} else {
pr_info(TEST_MODULE_NAME ": %s - Random LPM "
"TEST_FAILED for device %d of %d\n",
__func__,
(test_ctx->max_number_of_devices-
test_ctx->number_of_active_devices),
test_ctx->max_number_of_devices);
test_dev->final_result_per_dev = 0; /* FAILED */
}
check_test_completion();
kfree(test_ch->test_device->lpm_arr);
return 0;
}
exit_err:
pr_info(TEST_MODULE_NAME ": TEST FAIL for chan %s.\n",
test_ch->name);
test_ch->test_completed = 1;
test_dev->open_channels_counter_to_recv--;
test_dev->next_avail_entry_in_array = 0;
test_ch->next_index_in_sent_msg_per_chan = 0;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return -ENODEV;
}
static int lpm_test_create_read_thread(struct test_channel *test_ch)
{
struct sdio_test_device *test_dev;
pr_info(TEST_MODULE_NAME ": %s - STARTED channel %s\n",
__func__, test_ch->name);
if (!test_ch) {
pr_err(TEST_MODULE_NAME ": %s - NULL test channel\n", __func__);
return -ENODEV;
}
test_dev = test_ch->test_device;
if (!test_dev) {
pr_err(TEST_MODULE_NAME ": %s - NULL test device\n", __func__);
return -ENODEV;
}
test_dev->lpm_test_task.task_name = SDIO_LPM_TEST;
test_dev->lpm_test_task.lpm_task =
kthread_create(lpm_test_main_task,
(void *)(test_ch),
test_dev->lpm_test_task.task_name);
if (IS_ERR(test_dev->lpm_test_task.lpm_task)) {
pr_err(TEST_MODULE_NAME ": %s - kthread_create() failed\n",
__func__);
return -ENOMEM;
}
wake_up_process(test_dev->lpm_test_task.lpm_task);
return 0;
}
static void lpm_continuous_rand_test(struct test_channel *test_ch)
{
unsigned int local_ms = 0;
int ret = 0;
unsigned int write_avail = 0;
struct sdio_test_device *test_dev;
pr_info(MODULE_NAME ": %s - STARTED\n", __func__);
if (!test_ch) {
pr_err(TEST_MODULE_NAME ": %s - NULL channel\n", __func__);
return;
}
test_dev = test_ch->test_device;
if (!test_dev) {
pr_err(TEST_MODULE_NAME ": %s - NULL Test Device\n", __func__);
return;
}
ret = lpm_test_create_read_thread(test_ch);
if (ret != 0) {
pr_err(TEST_MODULE_NAME ": %s - failed to create lpm reading "
"thread", __func__);
}
while (1) {
struct lpm_msg msg;
u32 ret = 0;
/* sleeping period is dependent on number of open channels */
test_ch->config_msg.test_param =
test_ctx->lpm_pseudo_random_seed;
local_ms = test_dev->open_channels_counter_to_send *
test_ctx->lpm_pseudo_random_seed;
TEST_DBG(TEST_MODULE_NAME ":%s: SLEEPING for %d ms",
__func__, local_ms);
msleep(local_ms);
msg.counter = test_ch->next_index_in_sent_msg_per_chan;
msg.signature = LPM_TEST_CONFIG_SIGNATURE;
msg.reserve1 = 0;
msg.reserve2 = 0;
/* wait for data ready event */
write_avail = sdio_write_avail(test_ch->ch);
pr_debug(TEST_MODULE_NAME ": %s: write_avail=%d\n",
__func__, write_avail);
if (write_avail < sizeof(msg)) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->tx_notify_count));
atomic_dec(&test_ch->tx_notify_count);
}
write_avail = sdio_write_avail(test_ch->ch);
if (write_avail < sizeof(msg)) {
pr_info(TEST_MODULE_NAME ": %s: not enough write "
"avail.\n", __func__);
break;
}
ret = sdio_write(test_ch->ch, (u32 *)&msg, sizeof(msg));
if (ret)
pr_err(TEST_MODULE_NAME ":%s: sdio_write err=%d.\n",
__func__, -ret);
TEST_DBG(TEST_MODULE_NAME ": %s: for chan %s, write, "
"msg # %d\n",
__func__,
test_ch->name,
test_ch->next_index_in_sent_msg_per_chan);
if (test_ch->test_type == SDIO_TEST_LPM_RANDOM) {
spin_lock_irqsave(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
lpm_test_update_entry(test_ch, LPM_MSG_SEND,
"SEND ",
test_ch->
next_index_in_sent_msg_per_chan);
test_ch->next_index_in_sent_msg_per_chan++;
if (test_ch->next_index_in_sent_msg_per_chan ==
test_ch->config_msg.num_packets) {
spin_unlock_irqrestore(
&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
break;
}
spin_unlock_irqrestore(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
}
}
spin_lock_irqsave(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
test_dev->open_channels_counter_to_send--;
spin_unlock_irqrestore(&test_dev->lpm_array_lock,
test_dev->lpm_array_lock_flags);
pr_info(TEST_MODULE_NAME ": %s: - Finished to send all (%d) "
"packets to the modem on channel %s",
__func__, test_ch->config_msg.num_packets, test_ch->name);
return;
}
static void lpm_test(struct test_channel *test_ch)
{
pr_info(TEST_MODULE_NAME ": %s - START channel %s\n", __func__,
test_ch->name);
if (!test_ch) {
pr_err(TEST_MODULE_NAME ": %s - NULL test channel\n", __func__);
return;
}
test_ch->modem_result_per_chan = wait_for_result_msg(test_ch);
pr_debug(TEST_MODULE_NAME ": %s - delete the timeout timer\n",
__func__);
del_timer_sync(&test_ch->timeout_timer);
if (test_ch->modem_result_per_chan == 0) {
pr_err(TEST_MODULE_NAME ": LPM TEST - Client didn't sleep. "
"Result Msg - is_successful=%d\n", test_ch->buf[1]);
goto exit_err;
} else {
pr_info(TEST_MODULE_NAME ": %s -"
"LPM 9K WAS SLEEPING - PASS\n", __func__);
if (test_ch->test_result == TEST_PASSED) {
pr_info(TEST_MODULE_NAME ": LPM TEST_PASSED\n");
test_ch->test_completed = 1;
check_test_completion();
} else {
pr_err(TEST_MODULE_NAME ": LPM TEST - Host didn't "
"sleep. Client slept\n");
goto exit_err;
}
}
return;
exit_err:
pr_info(TEST_MODULE_NAME ": TEST FAIL for chan %s.\n",
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
/**
* LPM Test while the host wakes up the modem
*/
static void lpm_test_host_waker(struct test_channel *test_ch)
{
pr_info(TEST_MODULE_NAME ": %s - START\n", __func__);
wait_event(test_ch->wait_q, atomic_read(&test_ch->wakeup_client));
atomic_set(&test_ch->wakeup_client, 0);
pr_info(TEST_MODULE_NAME ": %s - Sending the config_msg to wakeup "
" the client\n", __func__);
send_config_msg(test_ch);
lpm_test(test_ch);
}
/**
* Writes number of packets into test channel
* @test_ch: test channel control struct
* @burst_size: number of packets to send
*/
static int write_packet_burst(struct test_channel *test_ch,
int burst_size)
{
int ret = 0;
int packet_count = 0;
unsigned int random_num = 0;
int size = test_ch->packet_length; /* first packet size */
u32 write_avail = 0;
while (packet_count < burst_size) {
/* wait for data ready event */
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":%s write_avail=%d,size=%d on chan"
" %s\n", __func__,
write_avail, size, test_ch->name);
if (write_avail < size) {
TEST_DBG(TEST_MODULE_NAME ":%s wait for event on"
" chan %s\n", __func__, test_ch->name);
wait_event(test_ch->wait_q,
atomic_read(&test_ch->tx_notify_count));
atomic_dec(&test_ch->tx_notify_count);
}
write_avail = sdio_write_avail(test_ch->ch);
if (write_avail < size) {
pr_info(TEST_MODULE_NAME ":%s not enough write"
" avail %d, need %d on chan %s\n",
__func__, write_avail, size,
test_ch->name);
continue;
}
ret = sdio_write(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s sdio_write "
"failed (%d) on chan %s\n", __func__,
ret, test_ch->name);
break;
}
udelay(1000); /*low bus usage while running number of channels*/
TEST_DBG(TEST_MODULE_NAME ":%s() successfully write %d bytes"
", packet_count=%d on chan %s\n", __func__,
size, packet_count, test_ch->name);
test_ch->tx_bytes += size;
packet_count++;
/* get next packet size */
random_num = get_random_int();
size = (random_num % test_ch->packet_length) + 1;
}
return ret;
}
/**
* Reads packet from test channel and checks that packet number
* encoded into the packet is equal to packet_counter
* This function is applicable for packet mode channels only
*
* @test_ch: test channel
* @size: expected packet size
* @packet_counter: number to validate readed packet
*/
static int read_data_from_packet_ch(struct test_channel *test_ch,
unsigned int size,
int packet_counter)
{
u32 read_avail = 0;
int ret = 0;
if (!test_ch || !test_ch->ch) {
pr_err(TEST_MODULE_NAME
":%s: NULL channel\n", __func__);
return -EINVAL;
}
if (!test_ch->ch->is_packet_mode) {
pr_err(TEST_MODULE_NAME
":%s:not packet mode ch %s\n",
__func__, test_ch->name);
return -EINVAL;
}
read_avail = sdio_read_avail(test_ch->ch);
/* wait for read data ready event */
if (read_avail < size) {
TEST_DBG(TEST_MODULE_NAME ":%s() wait for rx data on "
"chan %s\n", __func__, test_ch->name);
wait_event(test_ch->wait_q,
atomic_read(&test_ch->rx_notify_count));
atomic_dec(&test_ch->rx_notify_count);
}
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":%s read_avail=%d bytes on chan %s\n",
__func__, read_avail, test_ch->name);
if (read_avail != size) {
pr_err(TEST_MODULE_NAME
":read_avail size %d for chan %s not as "
"expected size %d\n",
read_avail, test_ch->name, size);
return -EINVAL;
}
ret = sdio_read(test_ch->ch, test_ch->buf, read_avail);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s() sdio_read for chan %s (%d)\n",
__func__, test_ch->name, -ret);
return ret;
}
if ((test_ch->buf[0] != packet_counter) && (size != 1)) {
pr_err(TEST_MODULE_NAME ":Read WRONG DATA"
" for chan %s, size=%d\n",
test_ch->name, size);
return -EINVAL;
}
return 0;
}
/**
* Reads packet from test channel and checks that packet number
* encoded into the packet is equal to packet_counter
* This function is applicable for streaming mode channels only
*
* @test_ch: test channel
* @size: expected packet size
* @packet_counter: number to validate readed packet
*/
static int read_data_from_stream_ch(struct test_channel *test_ch,
unsigned int size,
int packet_counter)
{
u32 read_avail = 0;
int ret = 0;
if (!test_ch || !test_ch->ch) {
pr_err(TEST_MODULE_NAME
":%s: NULL channel\n", __func__);
return -EINVAL;
}
if (test_ch->ch->is_packet_mode) {
pr_err(TEST_MODULE_NAME
":%s:not streaming mode ch %s\n",
__func__, test_ch->name);
return -EINVAL;
}
read_avail = sdio_read_avail(test_ch->ch);
/* wait for read data ready event */
if (read_avail < size) {
TEST_DBG(TEST_MODULE_NAME ":%s() wait for rx data on "
"chan %s\n", __func__, test_ch->name);
wait_event(test_ch->wait_q,
atomic_read(&test_ch->rx_notify_count));
atomic_dec(&test_ch->rx_notify_count);
}
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":%s read_avail=%d bytes on chan %s\n",
__func__, read_avail, test_ch->name);
if (read_avail < size) {
pr_err(TEST_MODULE_NAME
":read_avail size %d for chan %s not as "
"expected size %d\n",
read_avail, test_ch->name, size);
return -EINVAL;
}
ret = sdio_read(test_ch->ch, test_ch->buf, size + A2_HEADER_OVERHEAD);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s() sdio_read for chan %s (%d)\n",
__func__, test_ch->name, -ret);
return ret;
}
if ((test_ch->buf[A2_HEADER_OVERHEAD/4] != packet_counter) &&
(size != 1)) {
pr_err(TEST_MODULE_NAME ":Read WRONG DATA"
" for chan %s, size=%d, packet_counter=%d\n",
test_ch->name, size, packet_counter);
print_hex_dump(KERN_INFO, TEST_MODULE_NAME ": rmnet:",
0, 32, 2,
(void *)test_ch->buf,
size + A2_HEADER_OVERHEAD, false);
return -EINVAL;
}
return 0;
}
/**
* Test close channel feature for SDIO_SMEM channel:
* close && re-open the SDIO_SMEM channel.
*/
#ifdef CONFIG_MSM_SDIO_SMEM
static void open_close_smem_test(struct test_channel *test_ch)
{
int i = 0;
int ret = 0;
pr_info(TEST_MODULE_NAME ":%s\n", __func__);
for (i = 0; i < 100 ; ++i) {
ret = close_sdio_ch(test_ch);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s close_sdio_ch for ch %s"
" failed\n",
__func__, test_ch->name);
goto exit_err;
}
ret = open_sdio_ch(test_ch);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s open_sdio_ch for ch %s "
" failed\n",
__func__, test_ch->name);
goto exit_err;
}
}
pr_info(TEST_MODULE_NAME ":%s TEST PASS for chan %s.\n", __func__,
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_PASSED;
check_test_completion();
return;
exit_err:
pr_info(TEST_MODULE_NAME ":%s TEST FAIL for chan %s.\n", __func__,
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
#endif
/**
* Test close channel feature:
* 1. write random packet number into channel
* 2. read some data from channel (do this only for second half of
* requested packets to send).
* 3. close && re-open then repeat 1.
*
* Total packets to send: test_ch->config_msg.num_packets.
* Burst size is random in [1..test_ch->max_burst_size] range
* Packet size is random in [1..test_ch->packet_length]
*/
static void open_close_test(struct test_channel *test_ch)
{
int ret = 0;
u32 read_avail = 0;
int total_packet_count = 0;
int size = 0;
u16 *buf16 = NULL;
int i;
int max_packet_count = 0;
unsigned int random_num = 0;
int curr_burst_size = 0;
if (!test_ch || !test_ch->ch) {
pr_err(TEST_MODULE_NAME ":%s NULL channel\n",
__func__);
return;
}
curr_burst_size = test_ch->max_burst_size;
size = test_ch->packet_length;
buf16 = (u16 *) test_ch->buf;
/* the test sends configured number of packets in
2 portions: first without reading between write bursts,
second with it */
max_packet_count = test_ch->config_msg.num_packets / 2;
pr_info(TEST_MODULE_NAME ":%s channel %s, total packets:%d,"
" max packet size %d, max burst size:%d\n",
__func__, test_ch->name,
test_ch->config_msg.num_packets, test_ch->packet_length,
test_ch->max_burst_size);
for (i = 0 ; i < size / 2 ; i++)
buf16[i] = (u16) (i & 0xFFFF);
for (i = 0; i < 2 ; i++) {
total_packet_count = 0;
while (total_packet_count < max_packet_count) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":%s exit test\n",
__func__);
return;
}
test_ch->buf[0] = total_packet_count;
random_num = get_random_int();
curr_burst_size = (random_num %
test_ch->max_burst_size) + 1;
/* limit burst size to send
* no more than configured packets */
if (curr_burst_size + total_packet_count >
max_packet_count) {
curr_burst_size = max_packet_count -
total_packet_count;
}
TEST_DBG(TEST_MODULE_NAME ":%s Current burst size:%d"
" on chan %s\n", __func__,
curr_burst_size, test_ch->name);
ret = write_packet_burst(test_ch, curr_burst_size);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s write burst failed (%d), ch %s\n",
__func__, ret, test_ch->name);
goto exit_err;
}
if (i > 0) {
/* read from channel */
if (test_ch->ch->is_packet_mode)
ret = read_data_from_packet_ch(test_ch,
size,
total_packet_count);
else
ret = read_data_from_stream_ch(test_ch,
size,
total_packet_count);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s read"
" failed:%d, chan %s\n",
__func__, ret,
test_ch->name);
goto exit_err;
}
}
TEST_DBG(TEST_MODULE_NAME ":%s before close, ch %s\n",
__func__, test_ch->name);
ret = close_sdio_ch(test_ch);
if (ret) {
pr_err(TEST_MODULE_NAME":%s close channel %s"
" failed (%d)\n",
__func__, test_ch->name, ret);
goto exit_err;
} else {
TEST_DBG(TEST_MODULE_NAME":%s close channel %s"
" success\n", __func__,
test_ch->name);
total_packet_count += curr_burst_size;
atomic_set(&test_ch->rx_notify_count, 0);
atomic_set(&test_ch->tx_notify_count, 0);
atomic_set(&test_ch->any_notify_count, 0);
}
TEST_DBG(TEST_MODULE_NAME ":%s before open, ch %s\n",
__func__, test_ch->name);
ret = open_sdio_ch(test_ch);
if (ret) {
pr_err(TEST_MODULE_NAME":%s open channel %s"
" failed (%d)\n",
__func__, test_ch->name, ret);
goto exit_err;
} else {
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail > 0) {
pr_err(TEST_MODULE_NAME": after open"
" ch %s read_availis not zero"
" (%d bytes)\n",
test_ch->name, read_avail);
goto exit_err;
}
}
TEST_DBG(TEST_MODULE_NAME ":%s total tx = %d,"
" packet# = %d, size = %d for ch %s\n",
__func__, test_ch->tx_bytes,
total_packet_count, size,
test_ch->name);
} /* end of while */
}
pr_info(TEST_MODULE_NAME ":%s Test end: total rx bytes = 0x%x,"
" total tx bytes = 0x%x for chan %s\n", __func__,
test_ch->rx_bytes, test_ch->tx_bytes, test_ch->name);
pr_info(TEST_MODULE_NAME ":%s TEST PASS for chan %s.\n", __func__,
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_PASSED;
check_test_completion();
return;
exit_err:
pr_info(TEST_MODULE_NAME ":%s TEST FAIL for chan %s.\n", __func__,
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
/**
* sender Test
*/
static void sender_test(struct test_channel *test_ch)
{
int ret = 0 ;
u32 read_avail = 0;
u32 write_avail = 0;
int packet_count = 0;
int size = 512;
u16 *buf16 = (u16 *) test_ch->buf;
int i;
int max_packet_count = 10000;
int random_num = 0;
max_packet_count = test_ch->config_msg.num_packets;
for (i = 0 ; i < size / 2 ; i++)
buf16[i] = (u16) (i & 0xFFFF);
pr_info(TEST_MODULE_NAME
":SENDER TEST START for chan %s\n", test_ch->name);
while (packet_count < max_packet_count) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
random_num = get_random_int();
size = (random_num % test_ch->packet_length) + 1;
TEST_DBG(TEST_MODULE_NAME "SENDER WAIT FOR EVENT for chan %s\n",
test_ch->name);
/* wait for data ready event */
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
if (write_avail < size) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->tx_notify_count));
atomic_dec(&test_ch->tx_notify_count);
}
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
if (write_avail < size) {
pr_info(TEST_MODULE_NAME ":not enough write avail.\n");
continue;
}
test_ch->buf[0] = packet_count;
ret = sdio_write(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_info(TEST_MODULE_NAME ":sender sdio_write err=%d.\n",
-ret);
goto exit_err;
}
/* wait for read data ready event */
TEST_DBG(TEST_MODULE_NAME ":sender wait for rx data for "
"chan %s\n",
test_ch->name);
read_avail = sdio_read_avail(test_ch->ch);
wait_event(test_ch->wait_q,
atomic_read(&test_ch->rx_notify_count));
atomic_dec(&test_ch->rx_notify_count);
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail != size) {
pr_info(TEST_MODULE_NAME
":read_avail size %d for chan %s not as "
"expected size %d.\n",
read_avail, test_ch->name, size);
goto exit_err;
}
memset(test_ch->buf, 0x00, size);
ret = sdio_read(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_info(TEST_MODULE_NAME ":sender sdio_read for chan %s"
" err=%d.\n",
test_ch->name, -ret);
goto exit_err;
}
if ((test_ch->buf[0] != packet_count) && (size != 1)) {
pr_info(TEST_MODULE_NAME ":sender sdio_read WRONG DATA"
" for chan %s, size=%d\n",
test_ch->name, size);
goto exit_err;
}
test_ch->tx_bytes += size;
test_ch->rx_bytes += size;
packet_count++;
TEST_DBG(TEST_MODULE_NAME
":sender total rx bytes = 0x%x , packet#=%d, size=%d"
" for chan %s\n",
test_ch->rx_bytes, packet_count, size, test_ch->name);
TEST_DBG(TEST_MODULE_NAME
":sender total tx bytes = 0x%x , packet#=%d, size=%d"
" for chan %s\n",
test_ch->tx_bytes, packet_count, size, test_ch->name);
} /* end of while */
pr_info(TEST_MODULE_NAME
":SENDER TEST END: total rx bytes = 0x%x, "
" total tx bytes = 0x%x for chan %s\n",
test_ch->rx_bytes, test_ch->tx_bytes, test_ch->name);
pr_info(TEST_MODULE_NAME ": TEST PASS for chan %s.\n",
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_PASSED;
check_test_completion();
return;
exit_err:
pr_info(TEST_MODULE_NAME ": TEST FAIL for chan %s.\n",
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
/**
* A2 Perf Test
*/
static void a2_performance_test(struct test_channel *test_ch)
{
int ret = 0 ;
u32 read_avail = 0;
u32 write_avail = 0;
int tx_packet_count = 0;
int rx_packet_count = 0;
int size = 0;
u16 *buf16 = (u16 *) test_ch->buf;
int i;
int total_bytes = 0;
int max_packets = 10000;
u32 packet_size = test_ch->buf_size;
int rand_size = 0;
u64 start_jiffy, end_jiffy, delta_jiffies;
unsigned int time_msec = 0;
u32 throughput = 0;
max_packets = test_ch->config_msg.num_packets;
packet_size = test_ch->packet_length;
for (i = 0; i < packet_size / 2; i++)
buf16[i] = (u16) (i & 0xFFFF);
pr_info(TEST_MODULE_NAME ": A2 PERFORMANCE TEST START for chan %s\n",
test_ch->name);
start_jiffy = get_jiffies_64(); /* read the current time */
while (tx_packet_count < max_packets) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
if (test_ch->random_packet_size) {
rand_size = get_random_int();
packet_size = (rand_size % test_ch->packet_length) + 1;
if (packet_size < A2_MIN_PACKET_SIZE)
packet_size = A2_MIN_PACKET_SIZE;
}
/* wait for data ready event */
/* use a func to avoid compiler optimizations */
write_avail = sdio_write_avail(test_ch->ch);
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, write_avail=%d, "
"read_avail=%d for chan %s\n",
test_ch->name, write_avail, read_avail,
test_ch->name);
if ((write_avail == 0) && (read_avail == 0)) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->any_notify_count));
atomic_set(&test_ch->any_notify_count, 0);
}
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, write_avail=%d\n",
test_ch->name, write_avail);
if (write_avail > 0) {
size = min(packet_size, write_avail) ;
TEST_DBG(TEST_MODULE_NAME ":tx size = %d for chan %s\n",
size, test_ch->name);
test_ch->buf[0] = tx_packet_count;
test_ch->buf[(size/4)-1] = tx_packet_count;
ret = sdio_write(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_info(TEST_MODULE_NAME ":sdio_write err=%d"
" for chan %s\n",
-ret, test_ch->name);
goto exit_err;
}
tx_packet_count++;
test_ch->tx_bytes += size;
}
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, read_avail=%d\n",
test_ch->name, read_avail);
if (read_avail > 0) {
size = min(packet_size, read_avail);
pr_debug(TEST_MODULE_NAME ":rx size = %d.\n", size);
ret = sdio_read(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_info(TEST_MODULE_NAME ": sdio_read size %d "
" err=%d"
" for chan %s\n",
size, -ret, test_ch->name);
goto exit_err;
}
rx_packet_count++;
test_ch->rx_bytes += size;
}
TEST_DBG(TEST_MODULE_NAME
":total rx bytes = %d , rx_packet#=%d"
" for chan %s\n",
test_ch->rx_bytes, rx_packet_count, test_ch->name);
TEST_DBG(TEST_MODULE_NAME
":total tx bytes = %d , tx_packet#=%d"
" for chan %s\n",
test_ch->tx_bytes, tx_packet_count, test_ch->name);
} /* while (tx_packet_count < max_packets ) */
end_jiffy = get_jiffies_64(); /* read the current time */
delta_jiffies = end_jiffy - start_jiffy;
time_msec = jiffies_to_msecs(delta_jiffies);
pr_info(TEST_MODULE_NAME ":total rx bytes = 0x%x , rx_packet#=%d for"
" chan %s.\n",
test_ch->rx_bytes, rx_packet_count, test_ch->name);
pr_info(TEST_MODULE_NAME ":total tx bytes = 0x%x , tx_packet#=%d"
" for chan %s.\n",
test_ch->tx_bytes, tx_packet_count, test_ch->name);
total_bytes = (test_ch->tx_bytes + test_ch->rx_bytes);
pr_err(TEST_MODULE_NAME ":total bytes = %d, time msec = %d"
" for chan %s\n",
total_bytes , (int) time_msec, test_ch->name);
if (!test_ch->random_packet_size) {
if (time_msec) {
throughput = (total_bytes / time_msec) * 8 / 1000;
pr_err(TEST_MODULE_NAME ": %s - Performance = "
"%d Mbit/sec for chan %s\n",
__func__, throughput, test_ch->name);
} else {
pr_err(TEST_MODULE_NAME ": %s - time_msec = 0 Couldn't "
"calculate performence for chan %s\n",
__func__, test_ch->name);
}
}
#ifdef CONFIG_DEBUG_FS
switch (test_ch->ch_id) {
case SDIO_DUN:
test_ctx->debug.dun_throughput = throughput;
break;
case SDIO_RMNT:
test_ctx->debug.rmnt_throughput = throughput;
break;
default:
pr_err(TEST_MODULE_NAME "No debugfs for this channel "
"throughput");
}
#endif
pr_err(TEST_MODULE_NAME ": A2 PERFORMANCE TEST END for chan %s.\n",
test_ch->name);
pr_err(TEST_MODULE_NAME ": TEST PASS for chan %s\n", test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_PASSED;
check_test_completion();
return;
exit_err:
pr_err(TEST_MODULE_NAME ": TEST FAIL for chan %s\n", test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
/**
* rx_cleanup
* This function reads all the messages sent by the modem until
* the read_avail is 0 after 1 second of sleep.
* The function returns the number of packets that was received.
*/
static void rx_cleanup(struct test_channel *test_ch, int *rx_packet_count)
{
int read_avail = 0;
int ret = 0;
int counter = 0;
if (!test_ch || !test_ch->ch) {
pr_err(TEST_MODULE_NAME ":%s NULL channel\n",
__func__);
return;
}
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, read_avail=%d\n",
test_ch->name, read_avail);
/* If no pending messages, wait to see if the modem sends data */
if (read_avail == 0) {
msleep(1000);
read_avail = sdio_read_avail(test_ch->ch);
}
while ((read_avail > 0) && (counter < 10)) {
TEST_DBG(TEST_MODULE_NAME ": read_avail=%d for ch %s\n",
read_avail, test_ch->name);
ret = sdio_read(test_ch->ch, test_ch->buf, read_avail);
if (ret) {
pr_info(TEST_MODULE_NAME ": sdio_read size %d "
" err=%d for chan %s\n",
read_avail, -ret, test_ch->name);
break;
}
(*rx_packet_count)++;
test_ch->rx_bytes += read_avail;
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail == 0) {
msleep(1000);
counter++;
read_avail = sdio_read_avail(test_ch->ch);
}
}
pr_info(TEST_MODULE_NAME ": finished cleanup for ch %s, "
"rx_packet_count=%d, total rx bytes=%d\n",
test_ch->name, *rx_packet_count, test_ch->rx_bytes);
}
/**
* A2 RTT Test
* This function sends a packet and calculate the RTT time of
* this packet.
* The test also calculte Min, Max and Average RTT
*/
static void a2_rtt_test(struct test_channel *test_ch)
{
int ret = 0 ;
u32 read_avail = 0;
u32 write_avail = 0;
int tx_packet_count = 0;
int rx_packet_count = 0;
u16 *buf16 = NULL;
int i;
int max_packets = 0;
u32 packet_size = 0;
s64 start_time, end_time;
int delta_usec = 0;
int time_average = 0;
int min_delta_usec = 0xFFFF;
int max_delta_usec = 0;
int total_time = 0;
int expected_read_size = 0;
int delay_ms = 0;
int slow_rtt_counter = 0;
int read_avail_so_far = 0;
if (test_ch) {
/*
* Cleanup the pending RX data (such as loopback of the
* config msg)
*/
rx_cleanup(test_ch, &rx_packet_count);
rx_packet_count = 0;
} else {
return;
}
max_packets = test_ch->config_msg.num_packets;
packet_size = test_ch->packet_length;
buf16 = (u16 *) test_ch->buf;
for (i = 0; i < packet_size / 2; i++)
buf16[i] = (u16) (i & 0xFFFF);
pr_info(TEST_MODULE_NAME ": A2 RTT TEST START for chan %s\n",
test_ch->name);
switch (test_ch->ch_id) {
case SDIO_RMNT:
delay_ms = 100;
break;
case SDIO_CSVT:
delay_ms = 0;
break;
default:
pr_err(TEST_MODULE_NAME ": %s - ch_id invalid.\n",
__func__);
return;
}
while (tx_packet_count < max_packets) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
start_time = 0;
end_time = 0;
read_avail_so_far = 0;
if (delay_ms)
msleep(delay_ms);
/* wait for data ready event */
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":ch %s: write_avail=%d\n",
test_ch->name, write_avail);
if (write_avail == 0) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->tx_notify_count));
atomic_dec(&test_ch->tx_notify_count);
}
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, write_avail=%d\n",
test_ch->name, write_avail);
if (write_avail > 0) {
TEST_DBG(TEST_MODULE_NAME ":tx size = %d for chan %s\n",
packet_size, test_ch->name);
test_ch->buf[0] = tx_packet_count;
start_time = ktime_to_us(ktime_get());
ret = sdio_write(test_ch->ch, test_ch->buf,
packet_size);
if (ret) {
pr_err(TEST_MODULE_NAME ":sdio_write err=%d"
" for chan %s\n",
-ret, test_ch->name);
goto exit_err;
}
tx_packet_count++;
test_ch->tx_bytes += packet_size;
} else {
pr_err(TEST_MODULE_NAME ": Invalid write_avail"
" %d for chan %s\n",
write_avail, test_ch->name);
goto exit_err;
}
expected_read_size = packet_size + A2_HEADER_OVERHEAD;
while (read_avail_so_far < expected_read_size) {
read_avail = sdio_read_avail(test_ch->ch);
if (!read_avail) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->
rx_notify_count));
atomic_dec(&test_ch->rx_notify_count);
continue;
}
read_avail_so_far += read_avail;
if (read_avail_so_far > expected_read_size) {
pr_err(TEST_MODULE_NAME ": %s - Invalid "
"read_avail(%d) read_avail_so_far(%d) "
"can't be larger than "
"expected_read_size(%d).",
__func__,
read_avail,
read_avail_so_far,
expected_read_size);
goto exit_err;
}
/*
* must read entire pending bytes, so later, we will
* get a notification when more data arrives
*/
ret = sdio_read(test_ch->ch, test_ch->buf,
read_avail);
if (ret) {
pr_info(TEST_MODULE_NAME ": sdio_read size %d "
" err=%d for chan %s\n",
read_avail, -ret,
test_ch->name);
goto exit_err;
}
}
end_time = ktime_to_us(ktime_get());
rx_packet_count++;
test_ch->rx_bytes += expected_read_size;
delta_usec = (int)(end_time - start_time);
total_time += delta_usec;
if (delta_usec < min_delta_usec)
min_delta_usec = delta_usec;
if (delta_usec > max_delta_usec)
max_delta_usec = delta_usec;
/* checking the RTT per channel criteria */
if (delta_usec > MAX_AVG_RTT_TIME_USEC) {
pr_err(TEST_MODULE_NAME ": %s - "
"msg # %d - rtt time (%d usec) is "
"longer than %d usec\n",
__func__,
tx_packet_count,
delta_usec,
MAX_AVG_RTT_TIME_USEC);
slow_rtt_counter++;
}
TEST_DBG(TEST_MODULE_NAME
":RTT time=%d for packet #%d for chan %s\n",
delta_usec, tx_packet_count, test_ch->name);
} /* while (tx_packet_count < max_packets ) */
pr_info(TEST_MODULE_NAME ": %s - tx_packet_count = %d\n",
__func__, tx_packet_count);
pr_info(TEST_MODULE_NAME ": %s - total rx bytes = 0x%x, "
"rx_packet# = %d for chan %s.\n",
__func__, test_ch->rx_bytes, rx_packet_count, test_ch->name);
pr_info(TEST_MODULE_NAME ": %s - total tx bytes = 0x%x, "
"tx_packet# = %d for chan %s.\n",
__func__, test_ch->tx_bytes, tx_packet_count, test_ch->name);
pr_info(TEST_MODULE_NAME ": %s - slow_rtt_counter = %d for "
"chan %s.\n",
__func__, slow_rtt_counter, test_ch->name);
if (tx_packet_count) {
time_average = total_time / tx_packet_count;
pr_info(TEST_MODULE_NAME ":Average RTT time = %d for chan %s\n",
time_average, test_ch->name);
} else {
pr_err(TEST_MODULE_NAME ": %s - tx_packet_count=0. couldn't "
"calculate average rtt time", __func__);
}
pr_info(TEST_MODULE_NAME ":MIN RTT time = %d for chan %s\n",
min_delta_usec, test_ch->name);
pr_info(TEST_MODULE_NAME ":MAX RTT time = %d for chan %s\n",
max_delta_usec, test_ch->name);
pr_info(TEST_MODULE_NAME ": A2 RTT TEST END for chan %s.\n",
test_ch->name);
if (ret)
goto exit_err;
if (time_average == 0 || time_average > MAX_AVG_RTT_TIME_USEC) {
pr_err(TEST_MODULE_NAME ": %s - average_time = %d. Invalid "
"value",
__func__, time_average);
goto exit_err;
}
pr_info(TEST_MODULE_NAME ": TEST PASS for chan %s\n", test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_PASSED;
check_test_completion();
return;
exit_err:
pr_err(TEST_MODULE_NAME ": TEST FAIL for chan %s\n", test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
/**
* Process Rx Data - Helper for A2 Validation Test
* @test_ch(in/out) : Test channel that contains Rx data buffer to process.
*
* @rx_unprocessed_bytes(in) : Number of bytes to process in the buffer.
*
* @rx_process_packet_state(in/out) :
* Current processing state (used to identify what to process
* next in a partial packet)
*
* @rx_packet_size(in/out) :
* Number of bytes remaining in the packet to be processed.
*
* @rx_packet_count(in/out) :
* Number of packets processed.
*/
static int process_rx_data(struct test_channel *test_ch,
u32 rx_unprocessed_bytes,
int *rx_process_packet_state,
u16 *rx_packet_size,
int *rx_packet_count)
{
u8 *buf = (u8 *)test_ch->buf;
int eop = 0;
int i = 0;
int ret = 0;
u32 *ptr = 0;
u16 size = 0;
/* process rx data */
while (rx_unprocessed_bytes) {
TEST_DBG(TEST_MODULE_NAME ": unprocessed bytes : %u\n",
rx_unprocessed_bytes);
switch (*rx_process_packet_state) {
case RX_PROCESS_PACKET_INIT:
/* process the A2 header */
TEST_DBG(TEST_MODULE_NAME ": "
"RX_PROCESS_PACKET_INIT\n");
*rx_process_packet_state = RX_PROCESS_PACKET_INIT;
if (rx_unprocessed_bytes < 4)
break;
i += 4;
rx_unprocessed_bytes -= 4;
case RX_PROCESS_A2_HEADER:
/* process the rest of A2 header */
TEST_DBG(TEST_MODULE_NAME ": RX_PROCESS_A2_HEADER\n");
*rx_process_packet_state = RX_PROCESS_A2_HEADER;
if (rx_unprocessed_bytes < 4)
break;
ptr = (u32 *)&buf[i];
/*
* upper 2 bytes of the last 4 bytes of A2 header
* contains the size of the packet
*/
*rx_packet_size = *ptr >> 0x10;
i += 4;
rx_unprocessed_bytes -= 4;
case RX_PROCESS_PACKET_DATA:
/* process the2_2_ packet data */
TEST_DBG(TEST_MODULE_NAME ": RX_PROCESS_PACKET_DATA "
"- packet size - %u\n", *rx_packet_size);
*rx_process_packet_state = RX_PROCESS_PACKET_DATA;
size = *rx_packet_size;
if (*rx_packet_size <= rx_unprocessed_bytes) {
eop = *rx_packet_size;
*rx_packet_size = 0;
} else {
eop = rx_unprocessed_bytes;
*rx_packet_size = *rx_packet_size -
rx_unprocessed_bytes;
}
/* no more bytes available to process */
if (!eop)
break;
/*
* end of packet is starting from
* the current position
*/
eop = eop + i;
TEST_DBG(TEST_MODULE_NAME ": size - %u, "
"packet size - %u eop - %d\n",
size, *rx_packet_size, eop);
/* validate the data */
for (; i < eop; i++) {
if (buf[i] != (test_ch->rx_bytes % 256)) {
pr_err(TEST_MODULE_NAME ": "
"Corrupt data. buf:%u, "
"data:%u\n", buf[i],
test_ch->rx_bytes % 256);
ret = -EINVAL;
goto err;
}
rx_unprocessed_bytes--;
test_ch->rx_bytes++;
}
/* have more data to be processed */
if (*rx_packet_size)
break;
/*
* A2 sends data in 4 byte alignment,
* skip the padding
*/
if (size % 4) {
i += 4 - (size % 4);
rx_unprocessed_bytes -= 4 - (size % 4);
}
*rx_packet_count = *rx_packet_count + 1;
/* re init the state to process new packet */
*rx_process_packet_state = RX_PROCESS_PACKET_INIT;
break;
default:
pr_err(TEST_MODULE_NAME ": Invalid case: %d\n",
*rx_process_packet_state);
ret = -EINVAL;
goto err;
}
TEST_DBG(TEST_MODULE_NAME ": Continue processing "
"if more data is available\n");
}
err:
return ret;
}
/**
* A2 Validation Test
* Send packets and validate the returned packets.
* Transmit one packet at a time, while process multiple rx
* packets in a single transaction.
* A transaction is of size min(random number, write_avail).
* A packet consists of a min of 1 byte to channel supported max.
*/
static void a2_validation_test(struct test_channel *test_ch)
{
int ret = 0 ;
u32 read_avail = 0;
u32 write_avail = 0;
int tx_packet_count = 0;
int rx_packet_count = 0;
int initial_rx_packet_count = 0;
u32 size = 0;
u8 *buf8 = (u8 *)test_ch->buf;
int i = 0;
int max_packets = test_ch->config_msg.num_packets;
u16 tx_packet_size = 0;
u16 rx_packet_size = 0;
u32 random_num = 0;
int rx_process_packet_state = RX_PROCESS_PACKET_INIT;
pr_info(TEST_MODULE_NAME ": A2 VALIDATION TEST START for chan %s\n",
test_ch->name);
/* Wait for the initial rx messages before starting the test. */
rx_cleanup(test_ch, &initial_rx_packet_count);
test_ch->tx_bytes = 0;
test_ch->rx_bytes = 0;
/* Continue till we have transmitted and received all packets */
while ((tx_packet_count < max_packets) ||
(rx_packet_count < max_packets)) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
random_num = get_random_int();
size = (random_num % test_ch->packet_length) + 1;
TEST_DBG(TEST_MODULE_NAME ": Random tx packet size =%u", size);
/*
* wait for data ready event
* use a func to avoid compiler optimizations
*/
write_avail = sdio_write_avail(test_ch->ch);
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ": write_avail=%d, "
"read_avail=%d for chan %s\n",
write_avail, read_avail, test_ch->name);
if ((write_avail == 0) && (read_avail == 0)) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->any_notify_count));
atomic_set(&test_ch->any_notify_count, 0);
}
/* Transmit data */
write_avail = sdio_write_avail(test_ch->ch);
if ((tx_packet_count < max_packets) && (write_avail > 0)) {
tx_packet_size = min(size, write_avail) ;
TEST_DBG(TEST_MODULE_NAME ": tx size = %u, "
"write_avail = %u tx_packet# = %d\n",
tx_packet_size, write_avail,
tx_packet_count);
memset(test_ch->buf, 0, test_ch->buf_size);
/* populate the buffer */
for (i = 0; i < tx_packet_size; i++) {
buf8[i] = test_ch->tx_bytes % 256;
test_ch->tx_bytes++;
}
ret = sdio_write(test_ch->ch, test_ch->buf,
tx_packet_size);
if (ret) {
pr_err(TEST_MODULE_NAME ":sdio_write err=%d"
" for chan %s\n",
-ret, test_ch->name);
goto exit_err;
}
tx_packet_count++;
}
/* Receive data */
read_avail = sdio_read_avail(test_ch->ch);
if (read_avail > 0) {
TEST_DBG(TEST_MODULE_NAME ": rx size = %u, "
"rx_packet#=%d.\n",
read_avail, rx_packet_count);
memset(test_ch->buf, 0, test_ch->buf_size);
ret = sdio_read(test_ch->ch, test_ch->buf,
read_avail);
if (ret) {
pr_err(TEST_MODULE_NAME ": sdio_read "
"size %d err=%d for chan %s\n",
size, -ret, test_ch->name);
goto exit_err;
}
/* Process data */
ret = process_rx_data(test_ch, read_avail,
&rx_process_packet_state,
&rx_packet_size,
&rx_packet_count);
if (ret != 0)
goto exit_err;
}
TEST_DBG(TEST_MODULE_NAME ": Continue loop ...\n");
}
if (test_ch->tx_bytes != test_ch->rx_bytes) {
pr_err(TEST_MODULE_NAME ": Total number of bytes "
"transmitted (%u) does not match the total "
"number of bytes received (%u).", test_ch->tx_bytes,
test_ch->rx_bytes);
goto exit_err;
}
pr_info(TEST_MODULE_NAME ": A2 VALIDATION TEST END for chan %s.\n",
test_ch->name);
pr_info(TEST_MODULE_NAME ": TEST PASS for chan %s\n", test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_PASSED;
check_test_completion();
return;
exit_err:
pr_info(TEST_MODULE_NAME ": TEST FAIL for chan %s\n", test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
/**
* sender No loopback Test
*/
static void sender_no_loopback_test(struct test_channel *test_ch)
{
int ret = 0 ;
u32 write_avail = 0;
int packet_count = 0;
int size = 512;
u16 *buf16 = (u16 *) test_ch->buf;
int i;
int max_packet_count = 10000;
unsigned int random_num = 0;
max_packet_count = test_ch->config_msg.num_packets;
for (i = 0 ; i < size / 2 ; i++)
buf16[i] = (u16) (i & 0xFFFF);
pr_info(TEST_MODULE_NAME
":SENDER NO LP TEST START for chan %s\n", test_ch->name);
while (packet_count < max_packet_count) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
random_num = get_random_int();
size = (random_num % test_ch->packet_length) + 1;
TEST_DBG(TEST_MODULE_NAME ":SENDER WAIT FOR EVENT "
"for chan %s\n",
test_ch->name);
/* wait for data ready event */
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
if (write_avail < size) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->tx_notify_count));
atomic_dec(&test_ch->tx_notify_count);
}
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":write_avail=%d\n", write_avail);
if (write_avail < size) {
pr_info(TEST_MODULE_NAME ":not enough write avail.\n");
continue;
}
test_ch->buf[0] = packet_count;
ret = sdio_write(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_info(TEST_MODULE_NAME ":sender sdio_write err=%d.\n",
-ret);
goto exit_err;
}
test_ch->tx_bytes += size;
packet_count++;
TEST_DBG(TEST_MODULE_NAME
":sender total tx bytes = 0x%x , packet#=%d, size=%d"
" for chan %s\n",
test_ch->tx_bytes, packet_count, size, test_ch->name);
} /* end of while */
pr_info(TEST_MODULE_NAME
":SENDER TEST END: total tx bytes = 0x%x, "
" for chan %s\n",
test_ch->tx_bytes, test_ch->name);
test_ch->modem_result_per_chan = wait_for_result_msg(test_ch);
if (test_ch->modem_result_per_chan) {
pr_info(TEST_MODULE_NAME ": TEST PASS for chan %s.\n",
test_ch->name);
test_ch->test_result = TEST_PASSED;
} else {
pr_info(TEST_MODULE_NAME ": TEST FAILURE for chan %s.\n",
test_ch->name);
test_ch->test_result = TEST_FAILED;
}
test_ch->test_completed = 1;
check_test_completion();
return;
exit_err:
pr_info(TEST_MODULE_NAME ": TEST FAIL for chan %s.\n",
test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_FAILED;
check_test_completion();
return;
}
/**
* Modem reset Test
* The test verifies that it finished sending all the packets
* while there might be modem reset in the middle
*/
static void modem_reset_test(struct test_channel *test_ch)
{
int ret = 0 ;
u32 read_avail = 0;
u32 write_avail = 0;
int tx_packet_count = 0;
int rx_packet_count = 0;
int size = 0;
u16 *buf16 = (u16 *) test_ch->buf;
int i;
int max_packets = 10000;
u32 packet_size = test_ch->buf_size;
int is_err = 0;
max_packets = test_ch->config_msg.num_packets;
packet_size = test_ch->packet_length;
for (i = 0; i < packet_size / 2; i++)
buf16[i] = (u16) (i & 0xFFFF);
pr_info(TEST_MODULE_NAME ": Modem Reset TEST START for chan %s\n",
test_ch->name);
while (tx_packet_count < max_packets) {
if (test_ctx->exit_flag) {
pr_info(TEST_MODULE_NAME ":Exit Test.\n");
return;
}
if (test_ch->card_removed) {
pr_info(TEST_MODULE_NAME ": card removal was detected "
"for chan %s, tx_total=0x%x\n",
test_ch->name, test_ch->tx_bytes);
wait_event(test_ch->wait_q,
atomic_read(&test_ch->card_detected_event));
atomic_set(&test_ch->card_detected_event, 0);
pr_info(TEST_MODULE_NAME ": card_detected_event "
"for chan %s\n", test_ch->name);
if (test_ch->card_removed)
continue;
is_err = 0;
/* Need to wait for the modem to be ready */
msleep(5000);
pr_info(TEST_MODULE_NAME ": sending the config message "
"for chan %s\n", test_ch->name);
send_config_msg(test_ch);
}
/* wait for data ready event */
/* use a func to avoid compiler optimizations */
write_avail = sdio_write_avail(test_ch->ch);
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, write_avail=%d, "
"read_avail=%d for chan %s\n",
test_ch->name, write_avail, read_avail,
test_ch->name);
if ((write_avail == 0) && (read_avail == 0)) {
wait_event(test_ch->wait_q,
atomic_read(&test_ch->any_notify_count));
atomic_set(&test_ch->any_notify_count, 0);
}
if (atomic_read(&test_ch->card_detected_event)) {
atomic_set(&test_ch->card_detected_event, 0);
pr_info(TEST_MODULE_NAME ": card_detected_event "
"for chan %s, tx_total=0x%x\n",
test_ch->name, test_ch->tx_bytes);
if (test_ch->card_removed)
continue;
/* Need to wait for the modem to be ready */
msleep(5000);
is_err = 0;
pr_info(TEST_MODULE_NAME ": sending the config message "
"for chan %s\n", test_ch->name);
send_config_msg(test_ch);
}
write_avail = sdio_write_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, write_avail=%d\n",
test_ch->name, write_avail);
if (write_avail > 0) {
size = min(packet_size, write_avail) ;
pr_debug(TEST_MODULE_NAME ":tx size = %d for chan %s\n",
size, test_ch->name);
test_ch->buf[0] = tx_packet_count;
test_ch->buf[(size/4)-1] = tx_packet_count;
TEST_DBG(TEST_MODULE_NAME ":channel %s, sdio_write, "
"size=%d\n", test_ch->name, size);
if (is_err) {
msleep(100);
continue;
}
ret = sdio_write(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_info(TEST_MODULE_NAME ":sdio_write err=%d"
" for chan %s\n",
-ret, test_ch->name);
is_err = 1;
msleep(20);
continue;
}
tx_packet_count++;
test_ch->tx_bytes += size;
test_ch->config_msg.num_packets--;
}
read_avail = sdio_read_avail(test_ch->ch);
TEST_DBG(TEST_MODULE_NAME ":channel %s, read_avail=%d\n",
test_ch->name, read_avail);
if (read_avail > 0) {
size = min(packet_size, read_avail);
pr_debug(TEST_MODULE_NAME ":rx size = %d.\n", size);
TEST_DBG(TEST_MODULE_NAME ":channel %s, sdio_read, "
"size=%d\n", test_ch->name, size);
if (is_err) {
msleep(100);
continue;
}
ret = sdio_read(test_ch->ch, test_ch->buf, size);
if (ret) {
pr_info(TEST_MODULE_NAME ": sdio_read size %d "
" err=%d"
" for chan %s\n",
size, -ret, test_ch->name);
is_err = 1;
msleep(20);
continue;
}
rx_packet_count++;
test_ch->rx_bytes += size;
}
TEST_DBG(TEST_MODULE_NAME
":total rx bytes = %d , rx_packet#=%d"
" for chan %s\n",
test_ch->rx_bytes, rx_packet_count, test_ch->name);
TEST_DBG(TEST_MODULE_NAME
":total tx bytes = %d , tx_packet#=%d"
" for chan %s\n",
test_ch->tx_bytes, tx_packet_count, test_ch->name);
udelay(500);
} /* while (tx_packet_count < max_packets ) */
pr_info(TEST_MODULE_NAME ":total rx bytes = 0x%x , rx_packet#=%d for"
" chan %s.\n",
test_ch->rx_bytes, rx_packet_count, test_ch->name);
pr_info(TEST_MODULE_NAME ":total tx bytes = 0x%x , tx_packet#=%d"
" for chan %s.\n",
test_ch->tx_bytes, tx_packet_count, test_ch->name);
pr_err(TEST_MODULE_NAME ": Modem Reset TEST END for chan %s.\n",
test_ch->name);
pr_err(TEST_MODULE_NAME ": TEST PASS for chan %s\n", test_ch->name);
test_ch->test_completed = 1;
test_ch->test_result = TEST_PASSED;
check_test_completion();
return;
}
/**
* Worker thread to handle the tests types
*/
static void worker(struct work_struct *work)
{
struct test_channel *test_ch = NULL;
struct test_work *test_work = container_of(work,
struct test_work,
work);
int test_type = 0;
test_ch = test_work->test_ch;
if (test_ch == NULL) {
pr_err(TEST_MODULE_NAME ":NULL test_ch\n");
return;
}
test_type = test_ch->test_type;
switch (test_type) {
case SDIO_TEST_LOOPBACK_HOST:
loopback_test(test_ch);
break;
case SDIO_TEST_LOOPBACK_CLIENT:
sender_test(test_ch);
break;
case SDIO_TEST_PERF:
a2_performance_test(test_ch);
break;
case SDIO_TEST_LPM_CLIENT_WAKER:
lpm_test(test_ch);
break;
case SDIO_TEST_LPM_HOST_WAKER:
lpm_test_host_waker(test_ch);
break;
case SDIO_TEST_HOST_SENDER_NO_LP:
sender_no_loopback_test(test_ch);
break;
case SDIO_TEST_LPM_RANDOM:
lpm_continuous_rand_test(test_ch);
break;
case SDIO_TEST_RTT:
a2_rtt_test(test_ch);
break;
case SDIO_TEST_CLOSE_CHANNEL:
if (test_ch->ch_id != SDIO_SMEM)
open_close_test(test_ch);
break;
case SDIO_TEST_MODEM_RESET:
modem_reset_test(test_ch);
break;
case SDIO_TEST_A2_VALIDATION:
a2_validation_test(test_ch);
break;
default:
pr_err(TEST_MODULE_NAME ":Bad Test type = %d.\n",
(int) test_type);
}
}
/**
* Notification Callback
*
* Notify the worker
*
*/
static void notify(void *priv, unsigned channel_event)
{
struct test_channel *test_ch = (struct test_channel *) priv;
pr_debug(TEST_MODULE_NAME ": %s - notify event=%d.\n",
__func__, channel_event);
if (test_ch->ch == NULL) {
pr_info(TEST_MODULE_NAME ": %s - notify before ch ready.\n",
__func__);
return;
}
switch (channel_event) {
case SDIO_EVENT_DATA_READ_AVAIL:
atomic_inc(&test_ch->rx_notify_count);
atomic_set(&test_ch->any_notify_count, 1);
TEST_DBG(TEST_MODULE_NAME ": %s - SDIO_EVENT_DATA_READ_AVAIL, "
"any_notify_count=%d, rx_notify_count=%d\n",
__func__,
atomic_read(&test_ch->any_notify_count),
atomic_read(&test_ch->rx_notify_count));
/*
* when there is pending data on a channel we would like to
* turn on the bit mask that implies that there is pending
* data for that channel on that deivce
*/
if (test_ch->test_device != NULL &&
test_ch->test_type == SDIO_TEST_LPM_RANDOM) {
spin_lock_irqsave(&test_ch->test_device->lpm_array_lock,
test_ch->test_device->
lpm_array_lock_flags);
test_ch->test_device->read_avail_mask |=
test_ch->channel_mask_id;
test_ch->notify_counter_per_chan++;
lpm_test_update_entry(test_ch, LPM_NOTIFY, "NOTIFY", 0);
spin_unlock_irqrestore(&test_ch->test_device->
lpm_array_lock,
test_ch->test_device->
lpm_array_lock_flags);
}
break;
case SDIO_EVENT_DATA_WRITE_AVAIL:
atomic_inc(&test_ch->tx_notify_count);
atomic_set(&test_ch->any_notify_count, 1);
TEST_DBG(TEST_MODULE_NAME ": %s - SDIO_EVENT_DATA_WRITE_AVAIL, "
"any_notify_count=%d, tx_notify_count=%d\n",
__func__,
atomic_read(&test_ch->any_notify_count),
atomic_read(&test_ch->tx_notify_count));
break;
default:
BUG();
}
wake_up(&test_ch->wait_q);
}
#ifdef CONFIG_MSM_SDIO_SMEM
static int sdio_smem_test_cb(int event)
{
struct test_channel *tch = test_ctx->test_ch_arr[SDIO_SMEM];
int i;
int *smem_buf = (int *)test_ctx->smem_buf;
uint32_t val = 0;
int ret = 0;
pr_debug(TEST_MODULE_NAME ":%s: Received event %d\n", __func__, event);
if (!tch) {
pr_err(TEST_MODULE_NAME ": %s NULL tch\n", __func__);
return -EINVAL;
}
switch (event) {
case SDIO_SMEM_EVENT_READ_DONE:
tch->rx_bytes += SMEM_MAX_XFER_SIZE;
for (i = 0; i < SMEM_MAX_XFER_SIZE;) {
val = (int)*smem_buf;
if ((val != test_ctx->smem_counter) && tch->is_used) {
pr_err(TEST_MODULE_NAME ":%s: Invalid value %d "
"expected %d in smem arr",
__func__, val, test_ctx->smem_counter);
pr_err(TEST_MODULE_NAME ":SMEM test FAILED\n");
tch->test_completed = 1;
tch->test_result = TEST_FAILED;
check_test_completion();
ret = -EINVAL;
goto exit;
}
i += 4;
smem_buf++;
test_ctx->smem_counter++;
}
if (tch->rx_bytes >= 40000000) {
if ((!tch->test_completed) && tch->is_used) {
pr_info(TEST_MODULE_NAME ":SMEM test PASSED\n");
tch->test_completed = 1;
tch->test_result = TEST_PASSED;
check_test_completion();
}
}
break;
case SDIO_SMEM_EVENT_READ_ERR:
if (tch->is_used) {
pr_err(TEST_MODULE_NAME ":Read overflow, "
"SMEM test FAILED\n");
tch->test_completed = 1;
tch->test_result = TEST_FAILED;
ret = -EIO;
}
break;
default:
if (tch->is_used) {
pr_err(TEST_MODULE_NAME ":Unhandled event %d\n", event);
ret = -EINVAL;
}
break;
}
exit:
return ret;
}
static int sdio_smem_open(struct sdio_smem_client *sdio_smem)
{
int ret = 0;
if (!sdio_smem) {
pr_info(TEST_MODULE_NAME "%s: NULL sdio_smem_client\n",
__func__);
return -EINVAL;
}
if (test_ctx->test_ch_arr[SDIO_SMEM]->ch_ready) {
pr_info(TEST_MODULE_NAME "%s: SDIO_SMEM channel is already opened\n",
__func__);
return 0;
}
test_ctx->test_ch_arr[SDIO_SMEM]->ch_ready = 1;
sdio_smem->buf = test_ctx->smem_buf;
sdio_smem->size = SMEM_MAX_XFER_SIZE;
sdio_smem->cb_func = sdio_smem_test_cb;
ret = sdio_smem_register_client();
if (ret)
pr_info(TEST_MODULE_NAME "%s: Error (%d) registering sdio_smem "
"test client\n",
__func__, ret);
return ret;
}
static int sdio_smem_test_probe(struct platform_device *pdev)
{
test_ctx->sdio_smem = container_of(pdev, struct sdio_smem_client,
plat_dev);
return sdio_smem_open(test_ctx->sdio_smem);
}
static struct platform_driver sdio_smem_client_drv = {
.probe = sdio_smem_test_probe,
.driver = {
.name = "SDIO_SMEM_CLIENT",
.owner = THIS_MODULE,
},
};
#endif
static void sdio_test_lpm_timeout_handler(unsigned long data)
{
struct test_channel *tch = (struct test_channel *)data;
pr_info(TEST_MODULE_NAME ": %s - LPM TEST TIMEOUT Expired after "
"%d ms\n", __func__, tch->timeout_ms);
tch->test_completed = 1;
pr_info(TEST_MODULE_NAME ": %s - tch->test_result = TEST_FAILED\n",
__func__);
tch->test_completed = 1;
tch->test_result = TEST_FAILED;
check_test_completion();
return;
}
static void sdio_test_lpm_timer_handler(unsigned long data)
{
struct test_channel *tch = (struct test_channel *)data;
pr_info(TEST_MODULE_NAME ": %s - LPM TEST Timer Expired after "
"%d ms\n", __func__, tch->timer_interval_ms);
if (!tch) {
pr_err(TEST_MODULE_NAME ": %s - LPM TEST FAILED. "
"tch is NULL\n", __func__);
return;
}
if (!tch->ch) {
pr_err(TEST_MODULE_NAME ": %s - LPM TEST FAILED. tch->ch "
"is NULL\n", __func__);
tch->test_result = TEST_FAILED;
return;
}
/* Verfiy that we voted for sleep */
if (tch->is_ok_to_sleep) {
tch->test_result = TEST_PASSED;
pr_info(TEST_MODULE_NAME ": %s - 8K voted for sleep\n",
__func__);
} else {
tch->test_result = TEST_FAILED;
pr_info(TEST_MODULE_NAME ": %s - 8K voted against sleep\n",
__func__);
}
sdio_al_unregister_lpm_cb(tch->sdio_al_device);
if (tch->test_type == SDIO_TEST_LPM_HOST_WAKER) {
atomic_set(&tch->wakeup_client, 1);
wake_up(&tch->wait_q);
}
}
int sdio_test_wakeup_callback(void *device_handle, int is_vote_for_sleep)
{
int i = 0;
TEST_DBG(TEST_MODULE_NAME ": %s is_vote_for_sleep=%d!!!",
__func__, is_vote_for_sleep);
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if ((!tch) || (!tch->is_used) || (!tch->ch_ready))
continue;
if (tch->sdio_al_device == device_handle) {
tch->is_ok_to_sleep = is_vote_for_sleep;
if (tch->test_type == SDIO_TEST_LPM_RANDOM) {
spin_lock_irqsave(&tch->test_device->
lpm_array_lock,
tch->test_device->
lpm_array_lock_flags);
if (is_vote_for_sleep == 1)
lpm_test_update_entry(tch,
LPM_SLEEP,
"SLEEP ", 0);
else
lpm_test_update_entry(tch,
LPM_WAKEUP,
"WAKEUP", 0);
spin_unlock_irqrestore(&tch->test_device->
lpm_array_lock,
tch->test_device->
lpm_array_lock_flags);
break;
}
}
}
return 0;
}
static int sdio_test_find_dev(struct test_channel *tch)
{
int j;
int null_index = -1;
for (j = 0 ; j < MAX_NUM_OF_SDIO_DEVICES; ++j) {
struct sdio_test_device *test_dev =
&test_ctx->test_dev_arr[j];
if (test_dev->sdio_al_device == NULL) {
if (null_index == -1)
null_index = j;
continue;
}
if (test_dev->sdio_al_device ==
tch->ch->sdio_al_dev) {
test_dev->open_channels_counter_to_recv++;
test_dev->open_channels_counter_to_send++;
tch->test_device = test_dev;
/* setting mask id for pending data for
this channel */
tch->channel_mask_id = test_dev->next_mask_id;
test_dev->next_mask_id *= 2;
pr_info(TEST_MODULE_NAME ": %s - channel %s "
"got read_mask_id = 0x%x. device "
"next_mask_id=0x%x",
__func__, tch->name, tch->channel_mask_id,
test_dev->next_mask_id);
break;
}
}
/*
* happens ones a new device is "discovered" while testing. i.e
* if testing a few channels, a new deivce will be "discovered" once
* the first channel of a device is being tested
*/
if (j == MAX_NUM_OF_SDIO_DEVICES) {
struct sdio_test_device *test_dev =
&test_ctx->
test_dev_arr[null_index];
test_dev->sdio_al_device =
tch->ch->sdio_al_dev;
test_ctx->number_of_active_devices++;
test_ctx->max_number_of_devices++;
test_dev->open_channels_counter_to_recv++;
test_dev->open_channels_counter_to_send++;
test_dev->next_avail_entry_in_array = 0;
tch->test_device = test_dev;
tch->test_device->array_size =
LPM_ARRAY_SIZE;
test_dev->modem_result_per_dev = 1;
tch->modem_result_per_chan = 0;
test_dev->next_avail_entry_in_array = 0;
spin_lock_init(&test_dev->
lpm_array_lock);
if (tch->test_type == SDIO_TEST_LPM_RANDOM) {
pr_err(MODULE_NAME ": %s - "
"Allocating Msg Array for "
"Maximum open channels for device (%d) "
"Channels. Array has %d entries",
__func__,
LPM_MAX_OPEN_CHAN_PER_DEV,
test_dev->array_size);
test_dev->lpm_arr =
kzalloc(sizeof(
struct lpm_entry_type) *
tch->
test_device->array_size,
GFP_KERNEL);
if (!test_dev->lpm_arr) {
pr_err(MODULE_NAME ": %s - "
"lpm_arr is NULL",
__func__);
return -ENOMEM;
}
}
/*
* in new device, initialize next_mask_id, and setting
* mask_id to the channel
*/
test_dev->next_mask_id = 0x1;
tch->channel_mask_id = test_dev->next_mask_id;
test_dev->next_mask_id *= 2;
pr_info(TEST_MODULE_NAME ": %s - channel %s got "
"read_mask_id = 0x%x. device next_mask_id=0x%x",
__func__,
tch->name,
tch->channel_mask_id,
test_dev->next_mask_id);
}
return 0;
}
static void check_test_result(void)
{
int result = 1;
int i = 0;
test_ctx->max_number_of_devices = 0;
pr_info(TEST_MODULE_NAME ": %s - Woke Up\n", __func__);
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if ((!tch) || (!tch->is_used) || (!tch->ch_ready))
continue;
if (tch->test_type == SDIO_TEST_LPM_RANDOM)
result &= tch->test_device->final_result_per_dev;
else
if (tch->test_result == TEST_FAILED) {
pr_info(TEST_MODULE_NAME ": %s - "
"Test FAILED\n", __func__);
test_ctx->test_result = TEST_FAILED;
pr_err(TEST_MODULE_NAME ": %s - "
"test_result %d",
__func__, test_ctx->test_result);
return;
}
}
if (result == 0) {
pr_info(TEST_MODULE_NAME ": %s - Test FAILED\n", __func__);
test_ctx->test_result = TEST_FAILED;
pr_err(TEST_MODULE_NAME ": %s - "
"test_result %d",
__func__, test_ctx->test_result);
return;
}
pr_info(TEST_MODULE_NAME ": %s - Test PASSED", __func__);
test_ctx->test_result = TEST_PASSED;
pr_err(TEST_MODULE_NAME ": %s - "
"test_result %d",
__func__, test_ctx->test_result);
return;
}
/**
* Test Main
*/
static int test_start(void)
{
int ret = -ENOMEM;
int i;
pr_debug(TEST_MODULE_NAME ":Starting Test ....\n");
test_ctx->test_completed = 0;
test_ctx->test_result = TEST_NO_RESULT;
test_ctx->debug.dun_throughput = 0;
test_ctx->debug.rmnt_throughput = 0;
test_ctx->number_of_active_devices = 0;
pr_err(TEST_MODULE_NAME ": %s - test_result %d",
__func__, test_ctx->test_result);
memset(test_ctx->test_dev_arr, 0,
sizeof(struct sdio_test_device)*MAX_NUM_OF_SDIO_DEVICES);
/* Open The Channels */
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if ((!tch) || (!tch->is_used))
continue;
tch->rx_bytes = 0;
tch->tx_bytes = 0;
atomic_set(&tch->tx_notify_count, 0);
atomic_set(&tch->rx_notify_count, 0);
atomic_set(&tch->any_notify_count, 0);
atomic_set(&tch->wakeup_client, 0);
/* in case there are values left from previous tests */
tch->notify_counter_per_chan = 0;
tch->next_index_in_sent_msg_per_chan = 0;
memset(tch->buf, 0x00, tch->buf_size);
tch->test_result = TEST_NO_RESULT;
tch->test_completed = 0;
ret = open_sdio_ch(tch);
if (ret)
continue;
if (tch->ch_id != SDIO_SMEM) {
ret = sdio_test_find_dev(tch);
if (ret) {
pr_err(TEST_MODULE_NAME ": %s - "
"sdio_test_find_dev() returned with "
"error", __func__);
return -ENODEV;
}
tch->sdio_al_device = tch->ch->sdio_al_dev;
}
if ((tch->test_type == SDIO_TEST_LPM_HOST_WAKER) ||
(tch->test_type == SDIO_TEST_LPM_CLIENT_WAKER) ||
(tch->test_type == SDIO_TEST_LPM_RANDOM))
sdio_al_register_lpm_cb(tch->sdio_al_device,
sdio_test_wakeup_callback);
}
/*
* make some space between opening the channels and sending the
* config messages
*/
msleep(100);
/*
* try to delay send_config_msg of all channels to after the point
* when we open them all
*/
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if ((!tch) || (!tch->is_used))
continue;
if ((tch->ch_ready) && (tch->ch_id != SDIO_SMEM))
send_config_msg(tch);
if ((tch->test_type == SDIO_TEST_LPM_HOST_WAKER) ||
(tch->test_type == SDIO_TEST_LPM_CLIENT_WAKER) ||
(tch->test_type == SDIO_TEST_LPM_RANDOM)) {
if (tch->timer_interval_ms > 0) {
pr_info(TEST_MODULE_NAME ": %s - init timer, "
"ms=%d\n",
__func__, tch->timer_interval_ms);
init_timer(&tch->timer);
tch->timer.data = (unsigned long)tch;
tch->timer.function =
sdio_test_lpm_timer_handler;
tch->timer.expires = jiffies +
msecs_to_jiffies(tch->timer_interval_ms);
add_timer(&tch->timer);
}
}
}
pr_debug(TEST_MODULE_NAME ":queue_work..\n");
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if ((!tch) || (!tch->is_used) || (!tch->ch_ready))
continue;
if (tch->ch_id == SDIO_SMEM) {
#ifdef CONFIG_MSM_SDIO_SMEM
if (tch->test_type == SDIO_TEST_CLOSE_CHANNEL)
open_close_smem_test(tch);
#endif
} else {
queue_work(tch->workqueue, &tch->test_work.work);
}
}
pr_info(TEST_MODULE_NAME ": %s - Waiting for the test completion\n",
__func__);
wait_event(test_ctx->wait_q, test_ctx->test_completed);
check_test_result();
/*
* Close the channels and zero the is_used flag so that if the modem
* will be reset after the test completion we won't re-open
* the channels
*/
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if ((!tch) || (!tch->is_used))
continue;
if (!tch->ch_ready) {
tch->is_used = 0;
continue;
}
close_sdio_ch(tch);
tch->is_used = 0;
}
if (test_ctx->test_result == TEST_PASSED)
return 0;
else
return -EINVAL;
}
static int set_params_loopback_9k(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_LOOPBACK_CLIENT;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_LOOPBACK_CLIENT;
tch->config_msg.num_packets = 10000;
tch->config_msg.num_iterations = 1;
tch->packet_length = 512;
if (tch->ch_id == SDIO_RPC)
tch->packet_length = 128;
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_loopback_9k_close(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_CLOSE_CHANNEL;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_LOOPBACK_CLIENT;
tch->config_msg.num_packets = 5000;
tch->config_msg.num_iterations = 1;
tch->max_burst_size = 10;
switch (tch->ch_id) {
case SDIO_DUN:
case SDIO_RPC:
tch->packet_length = 128; /* max is 2K*/
break;
case SDIO_DIAG:
case SDIO_RMNT:
default:
tch->packet_length = 512; /* max is 4k */
}
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_a2_perf(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_PERF;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_LOOPBACK_CLIENT;
switch (tch->ch_id) {
case SDIO_DIAG:
tch->packet_length = 512;
break;
case SDIO_DUN:
tch->packet_length = DUN_PACKET_SIZE;
break;
case SDIO_CSVT:
tch->packet_length = CSVT_PACKET_SIZE;
break;
default:
tch->packet_length = MAX_XFER_SIZE;
break;
}
pr_info(TEST_MODULE_NAME ": %s: packet_length=%d", __func__,
tch->packet_length);
tch->config_msg.num_packets = 10000;
tch->config_msg.num_iterations = 1;
tch->random_packet_size = 0;
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_rtt(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_RTT;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_LOOPBACK_CLIENT;
switch (tch->ch_id) {
case SDIO_RMNT:
tch->packet_length = SDIO_RMNT_RTT_PACKET_SIZE;
break;
case SDIO_CSVT:
tch->packet_length = SDIO_CSVT_RTT_PACKET_SIZE;
break;
default:
pr_err(TEST_MODULE_NAME ": %s - ch_id invalid.\n", __func__);
return -EINVAL;
}
pr_info(TEST_MODULE_NAME ": %s: packet_length=%d", __func__,
tch->packet_length);
tch->config_msg.num_packets = 200;
tch->config_msg.num_iterations = 1;
tch->random_packet_size = 0;
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_a2_small_pkts(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_PERF;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_LOOPBACK_CLIENT;
tch->packet_length = 128;
tch->config_msg.num_packets = 1000000;
tch->config_msg.num_iterations = 1;
tch->random_packet_size = 1;
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_modem_reset(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_MODEM_RESET;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_LOOPBACK_CLIENT;
tch->packet_length = 512;
if (tch->ch_id == SDIO_RPC)
tch->packet_length = 128;
else if ((tch->ch_id == SDIO_RMNT) || (tch->ch_id == SDIO_DUN))
tch->packet_length = MAX_XFER_SIZE;
tch->config_msg.num_packets = 50000;
tch->config_msg.num_iterations = 1;
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_a2_validation(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_A2_VALIDATION;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_LOOPBACK_CLIENT;
if (tch->ch_id == SDIO_RMNT)
tch->packet_length = RMNT_PACKET_SIZE;
else if (tch->ch_id == SDIO_DUN)
tch->packet_length = DUN_PACKET_SIZE;
else
tch->packet_length = MAX_XFER_SIZE;
tch->config_msg.num_packets = 10000;
tch->config_msg.num_iterations = 1;
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_smem_test(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->timer_interval_ms = 0;
return 0;
}
static int set_params_lpm_test(struct test_channel *tch,
enum sdio_test_case_type test,
int timer_interval_ms)
{
static int first_time = 1;
if (!tch) {
pr_err(TEST_MODULE_NAME ": %s - NULL channel\n", __func__);
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = test;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = test;
tch->config_msg.num_packets = LPM_TEST_NUM_OF_PACKETS;
tch->config_msg.num_iterations = 1;
tch->timer_interval_ms = timer_interval_ms;
tch->timeout_ms = 10000;
tch->packet_length = 0;
if (test != SDIO_TEST_LPM_RANDOM) {
init_timer(&tch->timeout_timer);
tch->timeout_timer.data = (unsigned long)tch;
tch->timeout_timer.function = sdio_test_lpm_timeout_handler;
tch->timeout_timer.expires = jiffies +
msecs_to_jiffies(tch->timeout_ms);
add_timer(&tch->timeout_timer);
pr_info(TEST_MODULE_NAME ": %s - Initiated LPM TIMEOUT TIMER."
"set to %d ms\n",
__func__, tch->timeout_ms);
}
if (first_time) {
pr_info(TEST_MODULE_NAME ": %s - wake_lock_init() called\n",
__func__);
wake_lock_init(&test_ctx->wake_lock,
WAKE_LOCK_SUSPEND, TEST_MODULE_NAME);
first_time = 0;
}
pr_info(TEST_MODULE_NAME ": %s - wake_lock() for the TEST is "
"called channel %s. to prevent real sleeping\n",
__func__, tch->name);
wake_lock(&test_ctx->wake_lock);
return 0;
}
static int set_params_8k_sender_no_lp(struct test_channel *tch)
{
if (!tch) {
pr_err(TEST_MODULE_NAME ":NULL channel\n");
return -EINVAL;
}
tch->is_used = 1;
tch->test_type = SDIO_TEST_HOST_SENDER_NO_LP;
tch->config_msg.signature = TEST_CONFIG_SIGNATURE;
tch->config_msg.test_case = SDIO_TEST_HOST_SENDER_NO_LP;
tch->config_msg.num_packets = 1000;
tch->config_msg.num_iterations = 1;
tch->packet_length = 512;
if (tch->ch_id == SDIO_RPC)
tch->packet_length = 128;
tch->timer_interval_ms = 0;
return 0;
}
static void set_pseudo_random_seed(void)
{
/* Set the seed accoring to the kernel command parameters if any or
get a random value */
if (seed != 0) {
test_ctx->lpm_pseudo_random_seed = seed;
} else {
test_ctx->lpm_pseudo_random_seed =
(unsigned int)(get_jiffies_64() & 0xFFFF);
test_ctx->lpm_pseudo_random_seed =
pseudo_random_seed(&test_ctx->lpm_pseudo_random_seed);
}
pr_info(TEST_MODULE_NAME ":%s: seed is %u",
__func__, test_ctx->lpm_pseudo_random_seed);
}
/*
for each channel
1. open channel
2. close channel
*/
static int close_channel_lpm_test(int channel_num)
{
int ret = 0;
struct test_channel *tch = NULL;
tch = test_ctx->test_ch_arr[channel_num];
if (!tch) {
pr_info(TEST_MODULE_NAME ":%s ch#%d is NULL\n",
__func__, channel_num);
return 0;
}
ret = open_sdio_ch(tch);
if (ret) {
pr_err(TEST_MODULE_NAME":%s open channel %s"
" failed\n", __func__, tch->name);
return ret;
} else {
pr_info(TEST_MODULE_NAME":%s open channel %s"
" success\n", __func__, tch->name);
}
ret = close_sdio_ch(tch);
if (ret) {
pr_err(TEST_MODULE_NAME":%s close channel %s"
" failed\n", __func__, tch->name);
return ret;
} else {
pr_info(TEST_MODULE_NAME":%s close channel %s"
" success\n", __func__, tch->name);
}
tch->is_used = 0;
return ret;
}
/**
* Write File.
*
* @note Trigger the test from user space by:
* echo 1 > /dev/sdio_al_test
*
*/
ssize_t test_write(struct file *filp, const char __user *buf, size_t size,
loff_t *f_pos)
{
sdio_al_test_initial_dev_and_chan(test_ctx);
if (strict_strtol(buf, 10, &test_ctx->testcase))
return -EINVAL;
switch (test_ctx->testcase) {
case 98:
pr_info(TEST_MODULE_NAME " set runtime debug on");
test_ctx->runtime_debug = 1;
return size;
case 99:
pr_info(TEST_MODULE_NAME " set runtime debug off");
test_ctx->runtime_debug = 0;
return size;
default:
pr_info(TEST_MODULE_NAME ":Bad Test number = %d.\n",
(int)test_ctx->testcase);
return size;
}
return size;
}
/**
* Test Channel Init.
*/
int test_channel_init(char *name)
{
struct test_channel *test_ch;
int ch_id = 0;
int ret;
pr_debug(TEST_MODULE_NAME ":%s.\n", __func__);
pr_info(TEST_MODULE_NAME ": init test channel %s.\n", name);
ch_id = channel_name_to_id(name);
pr_debug(TEST_MODULE_NAME ":id = %d.\n", ch_id);
if (test_ctx->test_ch_arr[ch_id] == NULL) {
test_ch = kzalloc(sizeof(*test_ch), GFP_KERNEL);
if (test_ch == NULL) {
pr_err(TEST_MODULE_NAME ":kzalloc err for allocating "
"test_ch %s.\n",
name);
return -ENOMEM;
}
test_ctx->test_ch_arr[ch_id] = test_ch;
test_ch->ch_id = ch_id;
strncpy(test_ch->name, name,
strnlen(name, TEST_CH_NAME_SIZE)-SDIO_TEST_POSTFIX_SIZE);
test_ch->buf_size = MAX_XFER_SIZE;
test_ch->buf = kzalloc(test_ch->buf_size, GFP_KERNEL);
if (test_ch->buf == NULL) {
kfree(test_ch);
test_ctx->test_ch = NULL;
return -ENOMEM;
}
if (test_ch->ch_id == SDIO_SMEM) {
test_ctx->smem_buf = kzalloc(SMEM_MAX_XFER_SIZE,
GFP_KERNEL);
if (test_ctx->smem_buf == NULL) {
pr_err(TEST_MODULE_NAME ":%s: Unable to "
"allocate smem buf\n",
__func__);
kfree(test_ch);
test_ctx->test_ch = NULL;
return -ENOMEM;
}
#ifdef CONFIG_MSM_SDIO_SMEM
ret = platform_driver_register(&sdio_smem_client_drv);
if (ret) {
pr_err(TEST_MODULE_NAME ":%s: Unable to "
"register sdio smem "
"test client\n",
__func__);
return ret;
}
#endif
} else {
test_ch->workqueue =
create_singlethread_workqueue(test_ch->name);
test_ch->test_work.test_ch = test_ch;
INIT_WORK(&test_ch->test_work.work, worker);
init_waitqueue_head(&test_ch->wait_q);
}
} else {
test_ch = test_ctx->test_ch_arr[ch_id];
pr_info(TEST_MODULE_NAME ":%s: ch %s was detected again\n",
__func__, test_ch->name);
test_ch->card_removed = 0;
if ((test_ch->is_used) &&
(test_ch->test_type == SDIO_TEST_MODEM_RESET)) {
if (test_ch->ch_id == SDIO_SMEM) {
#ifdef CONFIG_MSM_SDIO_SMEM
ret = add_sdio_smem();
if (ret) {
test_ch->ch_ready = false;
return 0;
}
#endif
} else {
ret = open_sdio_ch(test_ch);
if (ret) {
pr_info(TEST_MODULE_NAME
":%s: open channel %s failed\n",
__func__, test_ch->name);
return 0;
}
ret = sdio_test_find_dev(test_ch);
if (ret) {
pr_err(TEST_MODULE_NAME ": %s - "
"sdio_test_find_dev() returned "
"with error", __func__);
return -ENODEV;
}
test_ch->sdio_al_device =
test_ch->ch->sdio_al_dev;
}
atomic_set(&test_ch->card_detected_event, 1);
wake_up(&test_ch->wait_q);
}
}
return 0;
}
static int sdio_test_channel_probe(struct platform_device *pdev)
{
if (!pdev)
return -EIO;
return test_channel_init((char *)pdev->name);
}
static int sdio_test_channel_remove(struct platform_device *pdev)
{
int ch_id;
if (!pdev)
return -EIO;
ch_id = channel_name_to_id((char *)pdev->name);
if (test_ctx->test_ch_arr[ch_id] == NULL)
return 0;
pr_info(TEST_MODULE_NAME "%s: remove ch %s\n",
__func__, test_ctx->test_ch_arr[ch_id]->name);
if ((ch_id == SDIO_SMEM) && (test_ctx->smem_pdev)) {
platform_device_unregister(test_ctx->smem_pdev);
test_ctx->smem_pdev = NULL;
}
test_ctx->test_ch_arr[ch_id]->ch_ready = 0;
test_ctx->test_ch_arr[ch_id]->card_removed = 1;
return 0;
}
static int sdio_test_channel_csvt_probe(struct platform_device *pdev)
{
int ret = 0;
if (!pdev)
return -ENODEV;
test_ctx->csvt_app_pdev = platform_device_alloc("SDIO_CSVT_TEST_APP",
-1);
ret = platform_device_add(test_ctx->csvt_app_pdev);
if (ret) {
pr_err(MODULE_NAME ":platform_device_add failed, "
"ret=%d\n", ret);
return ret;
}
return sdio_test_channel_probe(pdev);
}
static int sdio_test_channel_csvt_remove(struct platform_device *pdev)
{
if (!pdev)
return -ENODEV;
platform_device_unregister(test_ctx->csvt_app_pdev);
return sdio_test_channel_remove(pdev);
}
static struct platform_driver sdio_rpc_drv = {
.probe = sdio_test_channel_probe,
.remove = sdio_test_channel_remove,
.driver = {
.name = "SDIO_RPC_TEST",
.owner = THIS_MODULE,
},
};
static struct platform_driver sdio_qmi_drv = {
.probe = sdio_test_channel_probe,
.remove = sdio_test_channel_remove,
.driver = {
.name = "SDIO_QMI_TEST",
.owner = THIS_MODULE,
},
};
static struct platform_driver sdio_diag_drv = {
.probe = sdio_test_channel_probe,
.remove = sdio_test_channel_remove,
.driver = {
.name = "SDIO_DIAG_TEST",
.owner = THIS_MODULE,
},
};
static struct platform_driver sdio_smem_drv = {
.probe = sdio_test_channel_probe,
.remove = sdio_test_channel_remove,
.driver = {
.name = "SDIO_SMEM_TEST",
.owner = THIS_MODULE,
},
};
static struct platform_driver sdio_rmnt_drv = {
.probe = sdio_test_channel_probe,
.remove = sdio_test_channel_remove,
.driver = {
.name = "SDIO_RMNT_TEST",
.owner = THIS_MODULE,
},
};
static struct platform_driver sdio_dun_drv = {
.probe = sdio_test_channel_probe,
.remove = sdio_test_channel_remove,
.driver = {
.name = "SDIO_DUN_TEST",
.owner = THIS_MODULE,
},
};
static struct platform_driver sdio_csvt_drv = {
.probe = sdio_test_channel_csvt_probe,
.remove = sdio_test_channel_csvt_remove,
.driver = {
.name = "SDIO_CSVT_TEST",
.owner = THIS_MODULE,
},
};
static struct class *test_class;
const struct file_operations test_fops = {
.owner = THIS_MODULE,
.write = test_write,
};
/**
* Module Init.
*/
static int __init test_init(void)
{
int ret;
pr_debug(TEST_MODULE_NAME ":test_init.\n");
test_ctx = kzalloc(sizeof(struct test_context), GFP_KERNEL);
if (test_ctx == NULL) {
pr_err(TEST_MODULE_NAME ":kzalloc err.\n");
return -ENOMEM;
}
test_ctx->test_ch = NULL;
test_ctx->signature = TEST_SIGNATURE;
test_ctx->name = "UNKNOWN";
init_waitqueue_head(&test_ctx->wait_q);
#ifdef CONFIG_DEBUG_FS
sdio_al_test_debugfs_init();
#endif
test_class = class_create(THIS_MODULE, TEST_MODULE_NAME);
ret = alloc_chrdev_region(&test_ctx->dev_num, 0, 1, TEST_MODULE_NAME);
if (ret) {
pr_err(TEST_MODULE_NAME "alloc_chrdev_region err.\n");
return -ENODEV;
}
test_ctx->dev = device_create(test_class, NULL, test_ctx->dev_num,
test_ctx, TEST_MODULE_NAME);
if (IS_ERR(test_ctx->dev)) {
pr_err(TEST_MODULE_NAME ":device_create err.\n");
return -ENODEV;
}
test_ctx->cdev = cdev_alloc();
if (test_ctx->cdev == NULL) {
pr_err(TEST_MODULE_NAME ":cdev_alloc err.\n");
return -ENODEV;
}
cdev_init(test_ctx->cdev, &test_fops);
test_ctx->cdev->owner = THIS_MODULE;
ret = cdev_add(test_ctx->cdev, test_ctx->dev_num, 1);
if (ret)
pr_err(TEST_MODULE_NAME ":cdev_add err=%d\n", -ret);
else
pr_debug(TEST_MODULE_NAME ":SDIO-AL-Test init OK..\n");
platform_driver_register(&sdio_rpc_drv);
platform_driver_register(&sdio_qmi_drv);
platform_driver_register(&sdio_diag_drv);
platform_driver_register(&sdio_smem_drv);
platform_driver_register(&sdio_rmnt_drv);
platform_driver_register(&sdio_dun_drv);
platform_driver_register(&sdio_csvt_drv);
return ret;
}
/**
* Module Exit.
*/
static void __exit test_exit(void)
{
int i;
pr_debug(TEST_MODULE_NAME ":test_exit.\n");
test_ctx->exit_flag = true;
msleep(100); /* allow gracefully exit of the worker thread */
cdev_del(test_ctx->cdev);
device_destroy(test_class, test_ctx->dev_num);
unregister_chrdev_region(test_ctx->dev_num, 1);
platform_driver_unregister(&sdio_rpc_drv);
platform_driver_unregister(&sdio_qmi_drv);
platform_driver_unregister(&sdio_diag_drv);
platform_driver_unregister(&sdio_smem_drv);
platform_driver_unregister(&sdio_rmnt_drv);
platform_driver_unregister(&sdio_dun_drv);
platform_driver_unregister(&sdio_csvt_drv);
for (i = 0; i < SDIO_MAX_CHANNELS; i++) {
struct test_channel *tch = test_ctx->test_ch_arr[i];
if (!tch)
continue;
kfree(tch->buf);
kfree(tch);
}
#ifdef CONFIG_DEBUG_FS
sdio_al_test_debugfs_cleanup();
#endif
kfree(test_ctx);
pr_debug(TEST_MODULE_NAME ":test_exit complete.\n");
}
module_init(test_init);
module_exit(test_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SDIO_AL Test");
MODULE_AUTHOR("Amir Samuelov <amirs@codeaurora.org>");
| gpl-2.0 |
TeamGlade-Devices/android_kernel_htc_pico | drivers/s390/cio/css.c | 2515 | 29170 | /*
* driver for channel subsystem
*
* Copyright IBM Corp. 2002, 2010
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include <linux/proc_fs.h>
#include <asm/isc.h>
#include <asm/crw.h>
#include "css.h"
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
#include "device.h"
#include "idset.h"
#include "chp.h"
int css_init_done = 0;
int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
static struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
struct subchannel_id schid;
int ret;
init_subchannel_id(&schid);
ret = -ENODEV;
do {
do {
ret = fn(schid, data);
if (ret)
break;
} while (schid.sch_no++ < __MAX_SUBCHANNEL);
schid.sch_no = 0;
} while (schid.ssid++ < max_ssid);
return ret;
}
struct cb_data {
void *data;
struct idset *set;
int (*fn_known_sch)(struct subchannel *, void *);
int (*fn_unknown_sch)(struct subchannel_id, void *);
};
static int call_fn_known_sch(struct device *dev, void *data)
{
struct subchannel *sch = to_subchannel(dev);
struct cb_data *cb = data;
int rc = 0;
idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
}
static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
{
struct cb_data *cb = data;
int rc = 0;
if (idset_sch_contains(cb->set, schid))
rc = cb->fn_unknown_sch(schid, cb->data);
return rc;
}
static int call_fn_all_sch(struct subchannel_id schid, void *data)
{
struct cb_data *cb = data;
struct subchannel *sch;
int rc = 0;
sch = get_subchannel_by_schid(schid);
if (sch) {
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
put_device(&sch->dev);
} else {
if (cb->fn_unknown_sch)
rc = cb->fn_unknown_sch(schid, cb->data);
}
return rc;
}
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data)
{
struct cb_data cb;
int rc;
cb.data = data;
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
return for_each_subchannel(call_fn_all_sch, &cb);
idset_fill(cb.set);
/* Process registered subchannels. */
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
if (rc)
goto out;
/* Process unregistered subchannels. */
if (fn_unknown)
rc = for_each_subchannel(call_fn_unknown_sch, &cb);
out:
idset_free(cb.set);
return rc;
}
static void css_sch_todo(struct work_struct *work);
static struct subchannel *
css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
return ERR_PTR(-ENOMEM);
ret = cio_validate_subchannel (sch, schid);
if (ret < 0) {
kfree(sch);
return ERR_PTR(ret);
}
INIT_WORK(&sch->todo_work, css_sch_todo);
return sch;
}
static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) {
/* Reset intparm to zeroes. */
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
}
static int css_sch_device_register(struct subchannel *sch)
{
int ret;
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
ret = device_register(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
/**
* css_sch_device_unregister - unregister a subchannel
* @sch: subchannel to be unregistered
*/
void css_sch_device_unregister(struct subchannel *sch)
{
mutex_lock(&sch->reg_mutex);
if (device_is_registered(&sch->dev))
device_unregister(&sch->dev);
mutex_unlock(&sch->reg_mutex);
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
static void css_sch_todo(struct work_struct *work)
{
struct subchannel *sch;
enum sch_todo todo;
sch = container_of(work, struct subchannel, todo_work);
/* Find out todo. */
spin_lock_irq(sch->lock);
todo = sch->todo;
CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
sch->schid.sch_no, todo);
sch->todo = SCH_TODO_NOTHING;
spin_unlock_irq(sch->lock);
/* Perform todo. */
if (todo == SCH_TODO_UNREG)
css_sch_device_unregister(sch);
/* Release workqueue ref. */
put_device(&sch->dev);
}
/**
* css_sched_sch_todo - schedule a subchannel operation
* @sch: subchannel
* @todo: todo
*
* Schedule the operation identified by @todo to be performed on the slow path
* workqueue. Do nothing if another operation with higher priority is already
* scheduled. Needs to be called with subchannel lock held.
*/
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
{
CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
sch->schid.ssid, sch->schid.sch_no, todo);
if (sch->todo >= todo)
return;
/* Get workqueue ref. */
if (!get_device(&sch->dev))
return;
sch->todo = todo;
if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&sch->dev);
}
}
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
int mask;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
ssd->path_mask = pmcw->pim;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (pmcw->pim & mask) {
chp_id_init(&ssd->chpid[i]);
ssd->chpid[i].id = pmcw->chpid[i];
}
}
}
static void ssd_register_chpids(struct chsc_ssd_info *ssd)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd->path_mask & mask)
if (!chp_is_registered(ssd->chpid[i]))
chp_new(ssd->chpid[i]);
}
}
void css_update_ssd_info(struct subchannel *sch)
{
int ret;
if (cio_is_console(sch->schid)) {
/* Console is initialized too early for functions requiring
* memory allocation. */
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
} else {
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
ssd_register_chpids(&sch->ssd_info);
}
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "%01x\n", sch->st);
}
static DEVICE_ATTR(type, 0444, type_show, NULL);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "css:t%01X\n", sch->st);
}
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
NULL,
};
static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
static const struct attribute_group *default_subch_attr_groups[] = {
&subch_attr_group,
NULL,
};
static int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
sch->dev.groups = default_subch_attr_groups;
/*
* We don't want to generate uevents for I/O subchannels that don't
* have a working ccw device behind them since they will be
* unregistered before they can be used anyway, so we delay the add
* uevent until after device recognition was successful.
* Note that we suppress the uevent for all subchannel types;
* the subchannel driver can decide itself when it wants to inform
* userspace of its existence.
*/
dev_set_uevent_suppress(&sch->dev, 1);
css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
if (ret) {
CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
return ret;
}
if (!sch->driver) {
/*
* No driver matched. Generate the uevent now so that
* a fitting driver module may be loaded based on the
* modalias.
*/
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
return ret;
}
int css_probe_device(struct subchannel_id schid)
{
int ret;
struct subchannel *sch;
if (cio_is_console(schid))
sch = cio_get_console_subchannel();
else {
sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return PTR_ERR(sch);
}
ret = css_register_subchannel(sch);
if (ret) {
if (!cio_is_console(schid))
put_device(&sch->dev);
}
return ret;
}
static int
check_subchannel(struct device * dev, void * data)
{
struct subchannel *sch;
struct subchannel_id *schid = data;
sch = to_subchannel(dev);
return schid_equal(&sch->schid, schid);
}
struct subchannel *
get_subchannel_by_schid(struct subchannel_id schid)
{
struct device *dev;
dev = bus_find_device(&css_bus_type, NULL,
&schid, check_subchannel);
return dev ? to_subchannel(dev) : NULL;
}
/**
* css_sch_is_valid() - check if a subchannel is valid
* @schib: subchannel information block for the subchannel
*/
int css_sch_is_valid(struct schib *schib)
{
if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
return 0;
if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(css_sch_is_valid);
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
struct schib schib;
if (!slow) {
/* Will be done on the slow path. */
return -EAGAIN;
}
if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
/* Unusable - ignore. */
return 0;
}
CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
schid.sch_no);
return css_probe_device(schid);
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
int ret = 0;
if (sch->driver) {
if (sch->driver->sch_event)
ret = sch->driver->sch_event(sch, slow);
else
dev_dbg(&sch->dev,
"Got subchannel machine check but "
"no sch_event handler provided.\n");
}
if (ret != 0 && ret != -EAGAIN) {
CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
}
return ret;
}
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
struct subchannel *sch;
int ret;
sch = get_subchannel_by_schid(schid);
if (sch) {
ret = css_evaluate_known_subchannel(sch, slow);
put_device(&sch->dev);
} else
ret = css_evaluate_new_subchannel(schid, slow);
if (ret == -EAGAIN)
css_schedule_eval(schid);
}
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
static atomic_t css_eval_scheduled;
static int __init slow_subchannel_init(void)
{
spin_lock_init(&slow_subchannel_lock);
atomic_set(&css_eval_scheduled, 0);
init_waitqueue_head(&css_eval_wq);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
return -ENOMEM;
}
return 0;
}
static int slow_eval_known_fn(struct subchannel *sch, void *data)
{
int eval;
int rc;
spin_lock_irq(&slow_subchannel_lock);
eval = idset_sch_contains(slow_subchannel_set, sch->schid);
idset_sch_del(slow_subchannel_set, sch->schid);
spin_unlock_irq(&slow_subchannel_lock);
if (eval) {
rc = css_evaluate_known_subchannel(sch, 1);
if (rc == -EAGAIN)
css_schedule_eval(sch->schid);
}
return 0;
}
static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
{
int eval;
int rc = 0;
spin_lock_irq(&slow_subchannel_lock);
eval = idset_sch_contains(slow_subchannel_set, schid);
idset_sch_del(slow_subchannel_set, schid);
spin_unlock_irq(&slow_subchannel_lock);
if (eval) {
rc = css_evaluate_new_subchannel(schid, 1);
switch (rc) {
case -EAGAIN:
css_schedule_eval(schid);
rc = 0;
break;
case -ENXIO:
case -ENOMEM:
case -EIO:
/* These should abort looping */
break;
default:
rc = 0;
}
}
return rc;
}
static void css_slow_path_func(struct work_struct *unused)
{
unsigned long flags;
CIO_TRACE_EVENT(4, "slowpath");
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
NULL);
spin_lock_irqsave(&slow_subchannel_lock, flags);
if (idset_is_empty(slow_subchannel_set)) {
atomic_set(&css_eval_scheduled, 0);
wake_up(&css_eval_wq);
}
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
unsigned long flags;
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
void css_schedule_eval_all(void)
{
unsigned long flags;
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static int __unset_registered(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
idset_sch_del(set, sch->schid);
return 0;
}
static void css_schedule_eval_all_unreg(void)
{
unsigned long flags;
struct idset *unreg_set;
/* Find unregistered subchannels. */
unreg_set = idset_sch_new();
if (!unreg_set) {
/* Fallback. */
css_schedule_eval_all();
return;
}
idset_fill(unreg_set);
bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
/* Apply to slow_subchannel_set. */
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
void css_wait_for_slow_path(void)
{
flush_workqueue(cio_work_q);
}
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void)
{
css_schedule_eval_all_unreg();
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/*
* Called from the machine check handler for subchannel report words.
*/
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct subchannel_id mchk_schid;
struct subchannel *sch;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
if (crw1)
CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
crw1->anc, crw1->erc, crw1->rsid);
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = crw0->rsid;
if (crw1)
mchk_schid.ssid = (crw1->rsid >> 4) & 3;
if (crw0->erc == CRW_ERC_PMOD) {
sch = get_subchannel_by_schid(mchk_schid);
if (sch) {
css_update_ssd_info(sch);
put_device(&sch->dev);
}
}
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
css_evaluate_subchannel(mchk_schid, 0);
}
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
struct cpuid cpu_id;
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
} else {
#ifdef CONFIG_SMP
css->global_pgid.pgid_high.cpu_addr = stap();
#else
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
get_cpu_id(&cpu_id);
css->global_pgid.cpu_id = cpu_id.ident;
css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
static void
channel_subsystem_release(struct device *dev)
{
struct channel_subsystem *css;
css = to_css(dev);
mutex_destroy(&css->mutex);
if (css->pseudo_subchannel) {
/* Implies that it has been generated but never registered. */
css_subchannel_release(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
}
kfree(css);
}
static ssize_t
css_cm_enable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_subsystem *css = to_css(dev);
int ret;
if (!css)
return 0;
mutex_lock(&css->mutex);
ret = sprintf(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
}
static ssize_t
css_cm_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_subsystem *css = to_css(dev);
int ret;
unsigned long val;
ret = strict_strtoul(buf, 16, &val);
if (ret)
return ret;
mutex_lock(&css->mutex);
switch (val) {
case 0:
ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
break;
case 1:
ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&css->mutex);
return ret < 0 ? ret : count;
}
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
static int __init setup_css(int nr)
{
u32 tod_high;
int ret;
struct channel_subsystem *css;
css = channel_subsystems[nr];
memset(css, 0, sizeof(struct channel_subsystem));
css->pseudo_subchannel =
kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
if (!css->pseudo_subchannel)
return -ENOMEM;
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = cio_create_sch_lock(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
}
mutex_init(&css->mutex);
css->valid = 1;
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
}
static int css_reboot_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
int ret, i;
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (css->cm_enabled)
if (chsc_secm(css, 0))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
return ret;
}
static struct notifier_block css_reboot_notifier = {
.notifier_call = css_reboot_event,
};
/*
* Since the css devices are neither on a bus nor have a class
* nor have a special device type, we cannot stop/restart channel
* path measurements via the normal suspend/resume callbacks, but have
* to use notifiers.
*/
static int css_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
int ret, i;
switch (event) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
continue;
}
if (__chsc_do_secm(css, 0))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
continue;
}
if (__chsc_do_secm(css, 1))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
/* search for subchannels, which appeared during hibernation */
css_schedule_reprobe();
break;
default:
ret = NOTIFY_DONE;
}
return ret;
}
static struct notifier_block css_power_notifier = {
.notifier_call = css_power_event,
};
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing (except for the
* static console subchannel).
*/
static int __init css_bus_init(void)
{
int ret, i;
ret = chsc_init();
if (ret)
return ret;
chsc_determine_css_characteristics();
/* Try to enable MSS. */
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
if (ret)
max_ssid = 0;
else /* Success. */
max_ssid = __MAX_SSID;
ret = slow_subchannel_init();
if (ret)
goto out;
ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
if (ret)
goto out;
if ((ret = bus_register(&css_bus_type)))
goto out;
/* Setup css structure. */
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
if (!css) {
ret = -ENOMEM;
goto out_unregister;
}
channel_subsystems[i] = css;
ret = setup_css(i);
if (ret) {
kfree(channel_subsystems[i]);
goto out_unregister;
}
ret = device_register(&css->device);
if (ret) {
put_device(&css->device);
goto out_unregister;
}
if (css_chsc_characteristics.secm) {
ret = device_create_file(&css->device,
&dev_attr_cm_enable);
if (ret)
goto out_device;
}
ret = device_register(&css->pseudo_subchannel->dev);
if (ret) {
put_device(&css->pseudo_subchannel->dev);
goto out_file;
}
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
goto out_unregister;
ret = register_pm_notifier(&css_power_notifier);
if (ret) {
unregister_reboot_notifier(&css_reboot_notifier);
goto out_unregister;
}
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
return 0;
out_file:
if (css_chsc_characteristics.secm)
device_remove_file(&channel_subsystems[i]->device,
&dev_attr_cm_enable);
out_device:
device_unregister(&channel_subsystems[i]->device);
out_unregister:
while (i > 0) {
struct channel_subsystem *css;
i--;
css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
if (css_chsc_characteristics.secm)
device_remove_file(&css->device,
&dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
out:
crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
chsc_init_cleanup();
pr_alert("The CSS device driver initialization failed with "
"errno=%d\n", ret);
return ret;
}
static void __init css_bus_cleanup(void)
{
struct channel_subsystem *css;
int i;
for (i = 0; i <= __MAX_CSSID; i++) {
css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
if (css_chsc_characteristics.secm)
device_remove_file(&css->device, &dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
chsc_init_cleanup();
isc_unregister(IO_SCH_ISC);
}
static int __init channel_subsystem_init(void)
{
int ret;
ret = css_bus_init();
if (ret)
return ret;
cio_work_q = create_singlethread_workqueue("cio");
if (!cio_work_q) {
ret = -ENOMEM;
goto out_bus;
}
ret = io_subchannel_init();
if (ret)
goto out_wq;
return ret;
out_wq:
destroy_workqueue(cio_work_q);
out_bus:
css_bus_cleanup();
return ret;
}
subsys_initcall(channel_subsystem_init);
static int css_settle(struct device_driver *drv, void *unused)
{
struct css_driver *cssdrv = to_cssdriver(drv);
if (cssdrv->settle)
return cssdrv->settle();
return 0;
}
int css_complete_work(void)
{
int ret;
/* Wait for the evaluation of subchannels to finish. */
ret = wait_event_interruptible(css_eval_wq,
atomic_read(&css_eval_scheduled) == 0);
if (ret)
return -EINTR;
flush_workqueue(cio_work_q);
/* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
}
/*
* Wait for the initialization of devices to finish, to make sure we are
* done with our setup if the search for the root device starts.
*/
static int __init channel_subsystem_init_sync(void)
{
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
css_complete_work();
return 0;
}
subsys_initcall_sync(channel_subsystem_init_sync);
void channel_subsystem_reinit(void)
{
struct channel_path *chp;
struct chp_id chpid;
chsc_enable_facility(CHSC_SDA_OC_MSS);
chp_id_for_each(&chpid) {
chp = chpid_to_chp(chpid);
if (!chp)
continue;
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
}
}
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
/* Handle pending CRW's. */
crw_wait_for_channel_report();
ret = css_complete_work();
return ret ? ret : count;
}
static const struct file_operations cio_settle_proc_fops = {
.open = nonseekable_open,
.write = cio_settle_write,
.llseek = no_llseek,
};
static int __init cio_settle_init(void)
{
struct proc_dir_entry *entry;
entry = proc_create("cio_settle", S_IWUSR, NULL,
&cio_settle_proc_fops);
if (!entry)
return -ENOMEM;
return 0;
}
device_initcall(cio_settle_init);
#endif /*CONFIG_PROC_FS*/
int sch_is_pseudo_sch(struct subchannel *sch)
{
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
static int css_bus_match(struct device *dev, struct device_driver *drv)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
}
return 0;
}
static int css_probe(struct device *dev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(dev);
sch->driver = to_cssdriver(dev->driver);
ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
if (ret)
sch->driver = NULL;
return ret;
}
static int css_remove(struct device *dev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(dev);
ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
sch->driver = NULL;
return ret;
}
static void css_shutdown(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (sch->driver && sch->driver->shutdown)
sch->driver->shutdown(sch);
}
static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct subchannel *sch = to_subchannel(dev);
int ret;
ret = add_uevent_var(env, "ST=%01X", sch->st);
if (ret)
return ret;
ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
return ret;
}
static int css_pm_prepare(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (mutex_is_locked(&sch->reg_mutex))
return -EAGAIN;
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
/* Notify drivers that they may not register children. */
return drv->prepare ? drv->prepare(sch) : 0;
}
static void css_pm_complete(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (!sch->dev.driver)
return;
drv = to_cssdriver(sch->dev.driver);
if (drv->complete)
drv->complete(sch);
}
static int css_pm_freeze(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
return drv->freeze ? drv->freeze(sch) : 0;
}
static int css_pm_thaw(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
return drv->thaw ? drv->thaw(sch) : 0;
}
static int css_pm_restore(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *drv;
css_update_ssd_info(sch);
if (!sch->dev.driver)
return 0;
drv = to_cssdriver(sch->dev.driver);
return drv->restore ? drv->restore(sch) : 0;
}
static const struct dev_pm_ops css_pm_ops = {
.prepare = css_pm_prepare,
.complete = css_pm_complete,
.freeze = css_pm_freeze,
.thaw = css_pm_thaw,
.restore = css_pm_restore,
};
static struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
.remove = css_remove,
.shutdown = css_shutdown,
.uevent = css_uevent,
.pm = &css_pm_ops,
};
/**
* css_driver_register - register a css driver
* @cdrv: css driver to register
*
* This is mainly a wrapper around driver_register that sets name
* and bus_type in the embedded struct device_driver correctly.
*/
int css_driver_register(struct css_driver *cdrv)
{
cdrv->drv.bus = &css_bus_type;
return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);
/**
* css_driver_unregister - unregister a css driver
* @cdrv: css driver to unregister
*
* This is a wrapper around driver_unregister.
*/
void css_driver_unregister(struct css_driver *cdrv)
{
driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Pafcholini/android_kernel_c8690 | arch/mips/alchemy/common/gpiolib-au1000.c | 3795 | 4060 | /*
* Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org>
* GPIOLIB support for Au1000, Au1500, Au1100, Au1550 and Au12x0.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Notes :
* au1000 SoC have only one GPIO block : GPIO1
* Au1100, Au15x0, Au12x0 have a second one : GPIO2
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio.h>
static int gpio2_get(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
}
static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value)
{
alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value);
}
static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE);
}
static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE,
value);
}
static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE);
}
static int gpio1_get(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
}
static void gpio1_set(struct gpio_chip *chip,
unsigned offset, int value)
{
alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value);
}
static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE);
}
static int gpio1_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE,
value);
}
static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE);
}
struct gpio_chip alchemy_gpio_chip[] = {
[0] = {
.label = "alchemy-gpio1",
.direction_input = gpio1_direction_input,
.direction_output = gpio1_direction_output,
.get = gpio1_get,
.set = gpio1_set,
.to_irq = gpio1_to_irq,
.base = ALCHEMY_GPIO1_BASE,
.ngpio = ALCHEMY_GPIO1_NUM,
},
[1] = {
.label = "alchemy-gpio2",
.direction_input = gpio2_direction_input,
.direction_output = gpio2_direction_output,
.get = gpio2_get,
.set = gpio2_set,
.to_irq = gpio2_to_irq,
.base = ALCHEMY_GPIO2_BASE,
.ngpio = ALCHEMY_GPIO2_NUM,
},
};
static int __init alchemy_gpiolib_init(void)
{
gpiochip_add(&alchemy_gpio_chip[0]);
if (alchemy_get_cputype() != ALCHEMY_CPU_AU1000)
gpiochip_add(&alchemy_gpio_chip[1]);
return 0;
}
arch_initcall(alchemy_gpiolib_init);
| gpl-2.0 |
SerenityS/android_kernel_pantech_msm8974 | net/ipv4/udp_diag.c | 4819 | 5316 | /*
* udp_diag.c Module for monitoring UDP transport protocols sockets.
*
* Authors: Pavel Emelyanov, <xemul@parallels.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/inet_diag.h>
#include <linux/udp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/sock_diag.h>
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb, struct inet_diag_req_v2 *req,
struct nlattr *bc)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
{
int err = -EINVAL;
struct sock *sk;
struct sk_buff *rep;
if (req->sdiag_family == AF_INET)
sk = __udp4_lib_lookup(&init_net,
req->id.idiag_src[0], req->id.idiag_sport,
req->id.idiag_dst[0], req->id.idiag_dport,
req->id.idiag_if, tbl);
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6)
sk = __udp6_lib_lookup(&init_net,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
req->id.idiag_if, tbl);
#endif
else
goto out_nosk;
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
if (err)
goto out;
err = -ENOMEM;
rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
sizeof(struct inet_diag_meminfo) +
64)), GFP_KERNEL);
if (!rep)
goto out;
err = inet_sk_diag_fill(sk, NULL, rep, req,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
goto out;
}
err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
out:
if (sk)
sock_put(sk);
out_nosk:
return err;
}
static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
int num, s_num, slot, s_slot;
s_slot = cb->args[0];
num = s_num = cb->args[1];
for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
struct sock *sk;
struct hlist_nulls_node *node;
struct udp_hslot *hslot = &table->hash[slot];
if (hlist_nulls_empty(&hslot->head))
continue;
spin_lock_bh(&hslot->lock);
sk_nulls_for_each(sk, node, &hslot->head) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
goto next;
if (!(r->idiag_states & (1 << sk->sk_state)))
goto next;
if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family)
goto next;
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next;
if (r->id.idiag_dport != inet->inet_dport &&
r->id.idiag_dport)
goto next;
if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
spin_unlock_bh(&hslot->lock);
goto done;
}
next:
num++;
}
spin_unlock_bh(&hslot->lock);
}
done:
cb->args[0] = slot;
cb->args[1] = num;
}
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
udp_dump(&udp_table, skb, cb, r, bc);
}
static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
struct inet_diag_req_v2 *req)
{
return udp_dump_one(&udp_table, in_skb, nlh, req);
}
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
void *info)
{
r->idiag_rqueue = sk_rmem_alloc_get(sk);
r->idiag_wqueue = sk_wmem_alloc_get(sk);
}
static const struct inet_diag_handler udp_diag_handler = {
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDP,
};
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
udp_dump(&udplite_table, skb, cb, r, bc);
}
static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
struct inet_diag_req_v2 *req)
{
return udp_dump_one(&udplite_table, in_skb, nlh, req);
}
static const struct inet_diag_handler udplite_diag_handler = {
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
.idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDPLITE,
};
static int __init udp_diag_init(void)
{
int err;
err = inet_diag_register(&udp_diag_handler);
if (err)
goto out;
err = inet_diag_register(&udplite_diag_handler);
if (err)
goto out_lite;
out:
return err;
out_lite:
inet_diag_unregister(&udp_diag_handler);
goto out;
}
static void __exit udp_diag_exit(void)
{
inet_diag_unregister(&udplite_diag_handler);
inet_diag_unregister(&udp_diag_handler);
}
module_init(udp_diag_init);
module_exit(udp_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);
| gpl-2.0 |
caplio/android_kernel_samsung_hltedcm | drivers/message/fusion/mptsas.c | 5075 | 150915 | /*
* linux/drivers/message/fusion/mptsas.c
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/delay.h> /* for mdelay */
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_dbg.h>
#include "mptbase.h"
#include "mptscsih.h"
#include "mptsas.h"
#define my_NAME "Fusion MPT SAS Host driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptsas"
/*
* Reserved channel for integrated raid
*/
#define MPTSAS_RAID_CHANNEL 1
#define SAS_CONFIG_PAGE_TIMEOUT 30
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
static int mpt_pt_clear;
module_param(mpt_pt_clear, int, 0);
MODULE_PARM_DESC(mpt_pt_clear,
" Clear persistency table: enable=1 "
"(default=MPTSCSIH_PT_CLEAR=0)");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPTSAS_MAX_LUN (16895)
static int max_lun = MPTSAS_MAX_LUN;
module_param(max_lun, int, 0);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
static int mpt_loadtime_max_sectors = 8192;
module_param(mpt_loadtime_max_sectors, int, 0);
MODULE_PARM_DESC(mpt_loadtime_max_sectors,
" Maximum sector define for Host Bus Adaptor.Range 64 to 8192 default=8192");
static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
static void mptsas_firmware_event_work(struct work_struct *work);
static void mptsas_send_sas_event(struct fw_event_work *fw_event);
static void mptsas_send_raid_event(struct fw_event_work *fw_event);
static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
static void mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info);
static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
(MPT_ADAPTER *ioc, u64 sas_address);
static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
static int mptsas_add_end_device(MPT_ADAPTER *ioc,
struct mptsas_phyinfo *phy_info);
static void mptsas_del_end_device(MPT_ADAPTER *ioc,
struct mptsas_phyinfo *phy_info);
static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
(MPT_ADAPTER *ioc, u64 sas_address);
static void mptsas_expander_delete(MPT_ADAPTER *ioc,
struct mptsas_portinfo *port_info, u8 force);
static void mptsas_send_expander_event(struct fw_event_work *fw_event);
static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
void mptsas_schedule_target_reset(void *ioc);
static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
{
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- IO UNIT PAGE 0 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
ioc->name, le16_to_cpu(phy_data->AttachedDeviceHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Controller Handle=0x%X\n",
ioc->name, le16_to_cpu(phy_data->ControllerDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port=0x%X\n",
ioc->name, phy_data->Port));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port Flags=0x%X\n",
ioc->name, phy_data->PortFlags));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Flags=0x%X\n",
ioc->name, phy_data->PhyFlags));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
ioc->name, phy_data->NegotiatedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Controller PHY Device Info=0x%X\n", ioc->name,
le32_to_cpu(phy_data->ControllerPhyDeviceInfo)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DiscoveryStatus=0x%X\n\n",
ioc->name, le32_to_cpu(phy_data->DiscoveryStatus)));
}
static void mptsas_print_phy_pg0(MPT_ADAPTER *ioc, SasPhyPage0_t *pg0)
{
__le64 sas_address;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS PHY PAGE 0 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Attached Device Handle=0x%X\n", ioc->name,
le16_to_cpu(pg0->AttachedDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Attached PHY Identifier=0x%X\n", ioc->name,
pg0->AttachedPhyIdentifier));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Attached Device Info=0x%X\n",
ioc->name, le32_to_cpu(pg0->AttachedDeviceInfo)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
ioc->name, pg0->ProgrammedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Change Count=0x%X\n",
ioc->name, pg0->ChangeCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Info=0x%X\n\n",
ioc->name, le32_to_cpu(pg0->PhyInfo)));
}
static void mptsas_print_phy_pg1(MPT_ADAPTER *ioc, SasPhyPage1_t *pg1)
{
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS PHY PAGE 1 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Invalid Dword Count=0x%x\n",
ioc->name, pg1->InvalidDwordCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Running Disparity Error Count=0x%x\n", ioc->name,
pg1->RunningDisparityErrorCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Loss Dword Synch Count=0x%x\n", ioc->name,
pg1->LossDwordSynchCount));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"PHY Reset Problem Count=0x%x\n\n", ioc->name,
pg1->PhyResetProblemCount));
}
static void mptsas_print_device_pg0(MPT_ADAPTER *ioc, SasDevicePage0_t *pg0)
{
__le64 sas_address;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS DEVICE PAGE 0 ---------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
ioc->name, le16_to_cpu(pg0->DevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Handle=0x%X\n",
ioc->name, le16_to_cpu(pg0->ParentDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Enclosure Handle=0x%X\n",
ioc->name, le16_to_cpu(pg0->EnclosureHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Slot=0x%X\n",
ioc->name, le16_to_cpu(pg0->Slot)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Target ID=0x%X\n",
ioc->name, pg0->TargetID));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Bus=0x%X\n",
ioc->name, pg0->Bus));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Phy Num=0x%X\n",
ioc->name, pg0->PhyNum));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Access Status=0x%X\n",
ioc->name, le16_to_cpu(pg0->AccessStatus)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Device Info=0x%X\n",
ioc->name, le32_to_cpu(pg0->DeviceInfo)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Flags=0x%X\n",
ioc->name, le16_to_cpu(pg0->Flags)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n\n",
ioc->name, pg0->PhysicalPort));
}
static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
{
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"---- SAS EXPANDER PAGE 1 ------------\n", ioc->name));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n",
ioc->name, pg1->PhysicalPort));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Identifier=0x%X\n",
ioc->name, pg1->PhyIdentifier));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
ioc->name, pg1->NegotiatedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
ioc->name, pg1->ProgrammedLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hardware Link Rate=0x%X\n",
ioc->name, pg1->HwLinkRate));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Owner Device Handle=0x%X\n",
ioc->name, le16_to_cpu(pg1->OwnerDevHandle)));
dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Attached Device Handle=0x%X\n\n", ioc->name,
le16_to_cpu(pg1->AttachedDevHandle)));
}
/* inhibit sas firmware event handling */
static void
mptsas_fw_event_off(MPT_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
ioc->fw_events_off = 1;
ioc->sas_discovery_quiesce_io = 0;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* enable sas firmware event handling */
static void
mptsas_fw_event_on(MPT_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
ioc->fw_events_off = 0;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* queue a sas firmware event */
static void
mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
unsigned long delay)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)"
"on cpuid %d\n", ioc->name, __func__,
fw_event, smp_processor_id()));
queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
&fw_event->work, delay);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* requeue a sas firmware event */
static void
mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
unsigned long delay)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
"(fw_event=0x%p)on cpuid %d\n", ioc->name, __func__,
fw_event, smp_processor_id()));
fw_event->retries++;
queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
&fw_event->work, msecs_to_jiffies(delay));
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* free memory associated to a sas firmware event */
static void
mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
ioc->name, __func__, fw_event));
list_del(&fw_event->list);
kfree(fw_event);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/* walk the firmware event queue, and either stop or wait for
* outstanding events to complete */
static void
mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
{
struct fw_event_work *fw_event, *next;
struct mptsas_target_reset_event *target_reset_list, *n;
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
/* flush the target_reset_list */
if (!list_empty(&hd->target_reset_list)) {
list_for_each_entry_safe(target_reset_list, n,
&hd->target_reset_list, list) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: removing target reset for id=%d\n",
ioc->name, __func__,
target_reset_list->sas_event_data.TargetID));
list_del(&target_reset_list->list);
kfree(target_reset_list);
}
}
if (list_empty(&ioc->fw_event_list) ||
!ioc->fw_event_q || in_interrupt())
return;
list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
if (cancel_delayed_work(&fw_event->work))
mptsas_free_fw_event(ioc, fw_event);
}
}
static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
{
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
}
static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
}
/*
* mptsas_find_portinfo_by_handle
*
* This function should be called with the sas_topology_mutex already held
*/
static struct mptsas_portinfo *
mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
{
struct mptsas_portinfo *port_info, *rc=NULL;
int i;
list_for_each_entry(port_info, &ioc->sas_topology, list)
for (i = 0; i < port_info->num_phys; i++)
if (port_info->phy_info[i].identify.handle == handle) {
rc = port_info;
goto out;
}
out:
return rc;
}
/**
* mptsas_find_portinfo_by_sas_address -
* @ioc: Pointer to MPT_ADAPTER structure
* @handle:
*
* This function should be called with the sas_topology_mutex already held
*
**/
static struct mptsas_portinfo *
mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
{
struct mptsas_portinfo *port_info, *rc = NULL;
int i;
if (sas_address >= ioc->hba_port_sas_addr &&
sas_address < (ioc->hba_port_sas_addr +
ioc->hba_port_num_phy))
return ioc->hba_port_info;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(port_info, &ioc->sas_topology, list)
for (i = 0; i < port_info->num_phys; i++)
if (port_info->phy_info[i].identify.sas_address ==
sas_address) {
rc = port_info;
goto out;
}
out:
mutex_unlock(&ioc->sas_topology_mutex);
return rc;
}
/*
* Returns true if there is a scsi end device
*/
static inline int
mptsas_is_end_device(struct mptsas_devinfo * attached)
{
if ((attached->sas_address) &&
(attached->device_info &
MPI_SAS_DEVICE_INFO_END_DEVICE) &&
((attached->device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET) |
(attached->device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
return 1;
else
return 0;
}
/* no mutex */
static void
mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
{
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info;
u8 i;
if (!port_details)
return;
port_info = port_details->port_info;
phy_info = port_info->phy_info;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __func__, port_details,
port_details->num_phys, (unsigned long long)
port_details->phy_bitmask));
for (i = 0; i < port_info->num_phys; i++, phy_info++) {
if(phy_info->port_details != port_details)
continue;
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
mptsas_set_rphy(ioc, phy_info, NULL);
phy_info->port_details = NULL;
}
kfree(port_details);
}
static inline struct sas_rphy *
mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
{
if (phy_info->port_details)
return phy_info->port_details->rphy;
else
return NULL;
}
static inline void
mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
{
if (phy_info->port_details) {
phy_info->port_details->rphy = rphy;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
ioc->name, rphy));
}
if (rphy) {
dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
&rphy->dev, MYIOC_s_FMT "add:", ioc->name));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
ioc->name, rphy, rphy->dev.release));
}
}
static inline struct sas_port *
mptsas_get_port(struct mptsas_phyinfo *phy_info)
{
if (phy_info->port_details)
return phy_info->port_details->port;
else
return NULL;
}
static inline void
mptsas_set_port(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_port *port)
{
if (phy_info->port_details)
phy_info->port_details->port = port;
if (port) {
dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
&port->dev, MYIOC_s_FMT "add:", ioc->name));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "port=%p release=%p\n",
ioc->name, port, port->dev.release));
}
}
static inline struct scsi_target *
mptsas_get_starget(struct mptsas_phyinfo *phy_info)
{
if (phy_info->port_details)
return phy_info->port_details->starget;
else
return NULL;
}
static inline void
mptsas_set_starget(struct mptsas_phyinfo *phy_info, struct scsi_target *
starget)
{
if (phy_info->port_details)
phy_info->port_details->starget = starget;
}
/**
* mptsas_add_device_component -
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: fw mapped id's
* @id:
* @sas_address:
* @device_info:
*
**/
static void
mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
{
struct mptsas_device_info *sas_info, *next;
struct scsi_device *sdev;
struct scsi_target *starget;
struct sas_rphy *rphy;
/*
* Delete all matching devices out of the list
*/
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
if (!sas_info->is_logical_volume &&
(sas_info->sas_address == sas_address ||
(sas_info->fw.channel == channel &&
sas_info->fw.id == id))) {
list_del(&sas_info->list);
kfree(sas_info);
}
}
sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
if (!sas_info)
goto out;
/*
* Set Firmware mapping
*/
sas_info->fw.id = id;
sas_info->fw.channel = channel;
sas_info->sas_address = sas_address;
sas_info->device_info = device_info;
sas_info->slot = slot;
sas_info->enclosure_logical_id = enclosure_logical_id;
INIT_LIST_HEAD(&sas_info->list);
list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
/*
* Set OS mapping
*/
shost_for_each_device(sdev, ioc->sh) {
starget = scsi_target(sdev);
rphy = dev_to_rphy(starget->dev.parent);
if (rphy->identify.sas_address == sas_address) {
sas_info->os.id = starget->id;
sas_info->os.channel = starget->channel;
}
}
out:
mutex_unlock(&ioc->sas_device_info_mutex);
return;
}
/**
* mptsas_add_device_component_by_fw -
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: fw mapped id's
* @id:
*
**/
static void
mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct mptsas_devinfo sas_device;
struct mptsas_enclosure enclosure_info;
int rc;
rc = mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(channel << 8) + id);
if (rc)
return;
memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
(MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
sas_device.handle_enclosure);
mptsas_add_device_component(ioc, sas_device.channel,
sas_device.id, sas_device.sas_address, sas_device.device_info,
sas_device.slot, enclosure_info.enclosure_logical_id);
}
/**
* mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: fw mapped id's
* @id:
*
**/
static void
mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
struct scsi_target *starget)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidVolumePage0_t buffer = NULL;
int i;
RaidPhysDiskPage0_t phys_disk;
struct mptsas_device_info *sas_info, *next;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
/* assumption that all volumes on channel = 0 */
cfg.pageAddr = starget->id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!hdr.PageLength)
goto out;
buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
&dma_handle);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!buffer->NumPhysDisks)
goto out;
/*
* Adding entry for hidden components
*/
for (i = 0; i < buffer->NumPhysDisks; i++) {
if (mpt_raid_phys_disk_pg0(ioc,
buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
continue;
mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
phys_disk.PhysDiskID);
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry(sas_info, &ioc->sas_device_info_list,
list) {
if (!sas_info->is_logical_volume &&
(sas_info->fw.channel == phys_disk.PhysDiskBus &&
sas_info->fw.id == phys_disk.PhysDiskID)) {
sas_info->is_hidden_raid_component = 1;
sas_info->volume_id = starget->id;
}
}
mutex_unlock(&ioc->sas_device_info_mutex);
}
/*
* Delete all matching devices out of the list
*/
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
if (sas_info->is_logical_volume && sas_info->fw.id ==
starget->id) {
list_del(&sas_info->list);
kfree(sas_info);
}
}
sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
if (sas_info) {
sas_info->fw.id = starget->id;
sas_info->os.id = starget->id;
sas_info->os.channel = starget->channel;
sas_info->is_logical_volume = 1;
INIT_LIST_HEAD(&sas_info->list);
list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
}
mutex_unlock(&ioc->sas_device_info_mutex);
out:
if (buffer)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
dma_handle);
}
/**
* mptsas_add_device_component_starget -
* @ioc: Pointer to MPT_ADAPTER structure
* @starget:
*
**/
static void
mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
struct scsi_target *starget)
{
VirtTarget *vtarget;
struct sas_rphy *rphy;
struct mptsas_phyinfo *phy_info = NULL;
struct mptsas_enclosure enclosure_info;
rphy = dev_to_rphy(starget->dev.parent);
vtarget = starget->hostdata;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
rphy->identify.sas_address);
if (!phy_info)
return;
memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
(MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
phy_info->attached.handle_enclosure);
mptsas_add_device_component(ioc, phy_info->attached.channel,
phy_info->attached.id, phy_info->attached.sas_address,
phy_info->attached.device_info,
phy_info->attached.slot, enclosure_info.enclosure_logical_id);
}
/**
* mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: os mapped id's
* @id:
*
**/
static void
mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct mptsas_device_info *sas_info, *next;
/*
* Set is_cached flag
*/
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
if (sas_info->os.channel == channel && sas_info->os.id == id)
sas_info->is_cached = 1;
}
}
/**
* mptsas_del_device_components - Cleaning the list
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
static void
mptsas_del_device_components(MPT_ADAPTER *ioc)
{
struct mptsas_device_info *sas_info, *next;
mutex_lock(&ioc->sas_device_info_mutex);
list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
list) {
list_del(&sas_info->list);
kfree(sas_info);
}
mutex_unlock(&ioc->sas_device_info_mutex);
}
/*
* mptsas_setup_wide_ports
*
* Updates for new and existing narrow/wide port configuration
* in the sas_topology
*/
static void
mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
{
struct mptsas_portinfo_details * port_details;
struct mptsas_phyinfo *phy_info, *phy_info_cmp;
u64 sas_address;
int i, j;
mutex_lock(&ioc->sas_topology_mutex);
phy_info = port_info->phy_info;
for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
if (phy_info->attached.handle)
continue;
port_details = phy_info->port_details;
if (!port_details)
continue;
if (port_details->num_phys < 2)
continue;
/*
* Removing a phy from a port, letting the last
* phy be removed by firmware events.
*/
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: deleting phy = %d\n",
ioc->name, __func__, port_details, i));
port_details->num_phys--;
port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
if (phy_info->phy) {
devtprintk(ioc, dev_printk(KERN_DEBUG,
&phy_info->phy->dev, MYIOC_s_FMT
"delete phy %d, phy-obj (0x%p)\n", ioc->name,
phy_info->phy_id, phy_info->phy));
sas_port_delete_phy(port_details->port, phy_info->phy);
}
phy_info->port_details = NULL;
}
/*
* Populate and refresh the tree
*/
phy_info = port_info->phy_info;
for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
sas_address = phy_info->attached.sas_address;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "phy_id=%d sas_address=0x%018llX\n",
ioc->name, i, (unsigned long long)sas_address));
if (!sas_address)
continue;
port_details = phy_info->port_details;
/*
* Forming a port
*/
if (!port_details) {
port_details = kzalloc(sizeof(struct
mptsas_portinfo_details), GFP_KERNEL);
if (!port_details)
goto out;
port_details->num_phys = 1;
port_details->port_info = port_info;
if (phy_info->phy_id < 64 )
port_details->phy_bitmask |=
(1 << phy_info->phy_id);
phy_info->sas_port_add_phy=1;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tForming port\n\t\t"
"phy_id=%d sas_address=0x%018llX\n",
ioc->name, i, (unsigned long long)sas_address));
phy_info->port_details = port_details;
}
if (i == port_info->num_phys - 1)
continue;
phy_info_cmp = &port_info->phy_info[i + 1];
for (j = i + 1 ; j < port_info->num_phys ; j++,
phy_info_cmp++) {
if (!phy_info_cmp->attached.sas_address)
continue;
if (sas_address != phy_info_cmp->attached.sas_address)
continue;
if (phy_info_cmp->port_details == port_details )
continue;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"\t\tphy_id=%d sas_address=0x%018llX\n",
ioc->name, j, (unsigned long long)
phy_info_cmp->attached.sas_address));
if (phy_info_cmp->port_details) {
port_details->rphy =
mptsas_get_rphy(phy_info_cmp);
port_details->port =
mptsas_get_port(phy_info_cmp);
port_details->starget =
mptsas_get_starget(phy_info_cmp);
port_details->num_phys =
phy_info_cmp->port_details->num_phys;
if (!phy_info_cmp->port_details->num_phys)
kfree(phy_info_cmp->port_details);
} else
phy_info_cmp->sas_port_add_phy=1;
/*
* Adding a phy to a port
*/
phy_info_cmp->port_details = port_details;
if (phy_info_cmp->phy_id < 64 )
port_details->phy_bitmask |=
(1 << phy_info_cmp->phy_id);
port_details->num_phys++;
}
}
out:
for (i = 0; i < port_info->num_phys; i++) {
port_details = port_info->phy_info[i].port_details;
if (!port_details)
continue;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: phy_id=%02d num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __func__,
port_details, i, port_details->num_phys,
(unsigned long long)port_details->phy_bitmask));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
ioc->name, port_details->port, port_details->rphy));
}
dsaswideprintk(ioc, printk("\n"));
mutex_unlock(&ioc->sas_topology_mutex);
}
/**
* csmisas_find_vtarget
*
* @ioc
* @volume_id
* @volume_bus
*
**/
static VirtTarget *
mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
struct scsi_device *sdev;
VirtDevice *vdevice;
VirtTarget *vtarget = NULL;
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
if ((vdevice == NULL) ||
(vdevice->vtarget == NULL))
continue;
if ((vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT ||
vdevice->vtarget->raidVolume))
continue;
if (vdevice->vtarget->id == id &&
vdevice->vtarget->channel == channel)
vtarget = vdevice->vtarget;
}
return vtarget;
}
static void
mptsas_queue_device_delete(MPT_ADAPTER *ioc,
MpiEventDataSasDeviceStatusChange_t *sas_event_data)
{
struct fw_event_work *fw_event;
int sz;
sz = offsetof(struct fw_event_work, event_data) +
sizeof(MpiEventDataSasDeviceStatusChange_t);
fw_event = kzalloc(sz, GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
ioc->name, __func__, __LINE__);
return;
}
memcpy(fw_event->event_data, sas_event_data,
sizeof(MpiEventDataSasDeviceStatusChange_t));
fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
fw_event->ioc = ioc;
mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
}
static void
mptsas_queue_rescan(MPT_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
int sz;
sz = offsetof(struct fw_event_work, event_data);
fw_event = kzalloc(sz, GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
ioc->name, __func__, __LINE__);
return;
}
fw_event->event = -1;
fw_event->ioc = ioc;
mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
}
/**
* mptsas_target_reset
*
* Issues TARGET_RESET to end device using handshaking method
*
* @ioc
* @channel
* @id
*
* Returns (1) success
* (0) failure
*
**/
static int
mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
return 0;
mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
if (mf == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
"%s, no msg frames @%d!!\n", ioc->name,
__func__, __LINE__));
goto out_fail;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
ioc->name, mf));
/* Format the Request
*/
pScsiTm = (SCSITaskMgmt_t *) mf;
memset (pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->TargetID = id;
pScsiTm->Bus = channel;
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
return 1;
out_fail:
mpt_clear_taskmgmt_in_progress_flag(ioc);
return 0;
}
static void
mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
{
scsi_device_set_state(sdev, SDEV_BLOCK);
}
static void
mptsas_block_io_starget(struct scsi_target *starget)
{
if (starget)
starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
}
/**
* mptsas_target_reset_queue
*
* Receive request for TARGET_RESET after receiving an firmware
* event NOT_RESPONDING_EVENT, then put command in link list
* and queue if task_queue already in use.
*
* @ioc
* @sas_event_data
*
**/
static void
mptsas_target_reset_queue(MPT_ADAPTER *ioc,
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
{
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
VirtTarget *vtarget = NULL;
struct mptsas_target_reset_event *target_reset_list;
u8 id, channel;
id = sas_event_data->TargetID;
channel = sas_event_data->Bus;
vtarget = mptsas_find_vtarget(ioc, channel, id);
if (vtarget) {
mptsas_block_io_starget(vtarget->starget);
vtarget->deleted = 1; /* block IO */
}
target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
GFP_ATOMIC);
if (!target_reset_list) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
"%s, failed to allocate mem @%d..!!\n",
ioc->name, __func__, __LINE__));
return;
}
memcpy(&target_reset_list->sas_event_data, sas_event_data,
sizeof(*sas_event_data));
list_add_tail(&target_reset_list->list, &hd->target_reset_list);
target_reset_list->time_count = jiffies;
if (mptsas_target_reset(ioc, channel, id)) {
target_reset_list->target_reset_issued = 1;
}
}
/**
* mptsas_schedule_target_reset- send pending target reset
* @iocp: per adapter object
*
* This function will delete scheduled target reset from the list and
* try to send next target reset. This will be called from completion
* context of any Task management command.
*/
void
mptsas_schedule_target_reset(void *iocp)
{
MPT_ADAPTER *ioc = (MPT_ADAPTER *)(iocp);
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
struct list_head *head = &hd->target_reset_list;
struct mptsas_target_reset_event *target_reset_list;
u8 id, channel;
/*
* issue target reset to next device in the queue
*/
head = &hd->target_reset_list;
if (list_empty(head))
return;
target_reset_list = list_entry(head->next,
struct mptsas_target_reset_event, list);
id = target_reset_list->sas_event_data.TargetID;
channel = target_reset_list->sas_event_data.Bus;
target_reset_list->time_count = jiffies;
if (mptsas_target_reset(ioc, channel, id))
target_reset_list->target_reset_issued = 1;
return;
}
/**
* mptsas_taskmgmt_complete - complete SAS task management function
* @ioc: Pointer to MPT_ADAPTER structure
*
* Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
* queue to finish off removing device from upper layers. then send next
* TARGET_RESET in the queue.
**/
static int
mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
{
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
struct list_head *head = &hd->target_reset_list;
u8 id, channel;
struct mptsas_target_reset_event *target_reset_list;
SCSITaskMgmtReply_t *pScsiTmReply;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
"(mf = %p, mr = %p)\n", ioc->name, mf, mr));
pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
if (pScsiTmReply) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
"\ttask_type = 0x%02X, iocstatus = 0x%04X "
"loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
"term_cmnds = %d\n", ioc->name,
pScsiTmReply->Bus, pScsiTmReply->TargetID,
pScsiTmReply->TaskType,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),
pScsiTmReply->ResponseCode,
le32_to_cpu(pScsiTmReply->TerminationCount)));
if (pScsiTmReply->ResponseCode)
mptscsih_taskmgmt_response_code(ioc,
pScsiTmReply->ResponseCode);
}
if (pScsiTmReply && (pScsiTmReply->TaskType ==
MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->taskmgmt_cmds.reply, mr,
min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->taskmgmt_cmds.done);
return 1;
}
return 0;
}
mpt_clear_taskmgmt_in_progress_flag(ioc);
if (list_empty(head))
return 1;
target_reset_list = list_entry(head->next,
struct mptsas_target_reset_event, list);
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt: completed (%d seconds)\n",
ioc->name, jiffies_to_msecs(jiffies -
target_reset_list->time_count)/1000));
id = pScsiTmReply->TargetID;
channel = pScsiTmReply->Bus;
target_reset_list->time_count = jiffies;
/*
* retry target reset
*/
if (!target_reset_list->target_reset_issued) {
if (mptsas_target_reset(ioc, channel, id))
target_reset_list->target_reset_issued = 1;
return 1;
}
/*
* enable work queue to remove device from upper layers
*/
list_del(&target_reset_list->list);
if (!ioc->fw_events_off)
mptsas_queue_device_delete(ioc,
&target_reset_list->sas_event_data);
ioc->schedule_target_reset(ioc);
return 1;
}
/**
* mptscsih_ioc_reset
*
* @ioc
* @reset_phase
*
**/
static int
mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
MPT_SCSI_HOST *hd;
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
if ((ioc->bus_type != SAS) || (!rc))
return rc;
hd = shost_priv(ioc->sh);
if (!hd->ioc)
goto out;
switch (reset_phase) {
case MPT_IOC_SETUP_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
mptsas_fw_event_off(ioc);
break;
case MPT_IOC_PRE_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_POST_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->sas_mgmt.done);
}
mptsas_cleanup_fw_event_q(ioc);
mptsas_queue_rescan(ioc);
break;
default:
break;
}
out:
return rc;
}
/**
* enum device_state -
* @DEVICE_RETRY: need to retry the TUR
* @DEVICE_ERROR: TUR return error, don't add device
* @DEVICE_READY: device can be added
*
*/
enum device_state{
DEVICE_RETRY,
DEVICE_ERROR,
DEVICE_READY,
};
static int
mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasEnclosurePage0_t *buffer;
dma_addr_t dma_handle;
int error;
__le64 le_identifier;
memset(&hdr, 0, sizeof(hdr));
hdr.PageVersion = MPI_SASENCLOSURE0_PAGEVERSION;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_ENCLOSURE;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
/* save config data */
memcpy(&le_identifier, &buffer->EnclosureLogicalID, sizeof(__le64));
enclosure->enclosure_logical_id = le64_to_cpu(le_identifier);
enclosure->enclosure_handle = le16_to_cpu(buffer->EnclosureHandle);
enclosure->flags = le16_to_cpu(buffer->Flags);
enclosure->num_slot = le16_to_cpu(buffer->NumSlots);
enclosure->start_slot = le16_to_cpu(buffer->StartSlot);
enclosure->start_id = buffer->StartTargetID;
enclosure->start_channel = buffer->StartBus;
enclosure->sep_id = buffer->SEPTargetID;
enclosure->sep_channel = buffer->SEPBus;
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
/**
* mptsas_add_end_device - report a new end device to sas transport layer
* @ioc: Pointer to MPT_ADAPTER structure
* @phy_info: describes attached device
*
* return (0) success (1) failure
*
**/
static int
mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
{
struct sas_rphy *rphy;
struct sas_port *port;
struct sas_identify identify;
char *ds = NULL;
u8 fw_id;
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
return 1;
}
fw_id = phy_info->attached.id;
if (mptsas_get_rphy(phy_info)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return 2;
}
port = mptsas_get_port(phy_info);
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return 3;
}
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET)
ds = "ssp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET)
ds = "stp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "sata";
printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
" phy %d, sas_addr 0x%llx\n", ioc->name, ds,
phy_info->attached.channel, phy_info->attached.id,
phy_info->attached.phy_id, (unsigned long long)
phy_info->attached.sas_address);
mptsas_parse_device_info(&identify, &phy_info->attached);
rphy = sas_end_device_alloc(port);
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return 5; /* non-fatal: an rphy can be added later */
}
rphy->identify = identify;
if (sas_rphy_add(rphy)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
sas_rphy_free(rphy);
return 6;
}
mptsas_set_rphy(ioc, phy_info, rphy);
return 0;
}
/**
* mptsas_del_end_device - report a deleted end device to sas transport layer
* @ioc: Pointer to MPT_ADAPTER structure
* @phy_info: describes attached device
*
**/
static void
mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
{
struct sas_rphy *rphy;
struct sas_port *port;
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info_parent;
int i;
char *ds = NULL;
u8 fw_id;
u64 sas_address;
if (!phy_info)
return;
fw_id = phy_info->attached.id;
sas_address = phy_info->attached.sas_address;
if (!phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return;
}
rphy = mptsas_get_rphy(phy_info);
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return;
}
if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
|| phy_info->attached.device_info
& MPI_SAS_DEVICE_INFO_SMP_INITIATOR
|| phy_info->attached.device_info
& MPI_SAS_DEVICE_INFO_STP_INITIATOR)
ds = "initiator";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SSP_TARGET)
ds = "ssp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_STP_TARGET)
ds = "stp";
if (phy_info->attached.device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "sata";
dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
"removing %s device: fw_channel %d, fw_id %d, phy %d,"
"sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
phy_info->attached.id, phy_info->attached.phy_id,
(unsigned long long) sas_address);
port = mptsas_get_port(phy_info);
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, fw_id, __LINE__));
return;
}
port_info = phy_info->portinfo;
phy_info_parent = port_info->phy_info;
for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
if (!phy_info_parent->phy)
continue;
if (phy_info_parent->attached.sas_address !=
sas_address)
continue;
dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
ioc->name, phy_info_parent->phy_id,
phy_info_parent->phy);
sas_port_delete_phy(port, phy_info_parent->phy);
}
dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
"delete port %d, sas_addr (0x%llx)\n", ioc->name,
port->port_identifier, (unsigned long long)sas_address);
sas_port_delete(port);
mptsas_set_port(ioc, phy_info, NULL);
mptsas_port_delete(ioc, phy_info->port_details);
}
struct mptsas_phyinfo *
mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
struct mptsas_devinfo *sas_device)
{
struct mptsas_phyinfo *phy_info;
struct mptsas_portinfo *port_info;
int i;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_device->sas_address);
if (!phy_info)
goto out;
port_info = phy_info->portinfo;
if (!port_info)
goto out;
mutex_lock(&ioc->sas_topology_mutex);
for (i = 0; i < port_info->num_phys; i++) {
if (port_info->phy_info[i].attached.sas_address !=
sas_device->sas_address)
continue;
port_info->phy_info[i].attached.channel = sas_device->channel;
port_info->phy_info[i].attached.id = sas_device->id;
port_info->phy_info[i].attached.sas_address =
sas_device->sas_address;
port_info->phy_info[i].attached.handle = sas_device->handle;
port_info->phy_info[i].attached.handle_parent =
sas_device->handle_parent;
port_info->phy_info[i].attached.handle_enclosure =
sas_device->handle_enclosure;
}
mutex_unlock(&ioc->sas_topology_mutex);
out:
return phy_info;
}
/**
* mptsas_firmware_event_work - work thread for processing fw events
* @work: work queue payload containing info describing the event
* Context: user
*
*/
static void
mptsas_firmware_event_work(struct work_struct *work)
{
struct fw_event_work *fw_event =
container_of(work, struct fw_event_work, work.work);
MPT_ADAPTER *ioc = fw_event->ioc;
/* special rescan topology handling */
if (fw_event->event == -1) {
if (ioc->in_rescan) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: rescan ignored as it is in progress\n",
ioc->name, __func__));
return;
}
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
"reset\n", ioc->name, __func__));
ioc->in_rescan = 1;
mptsas_not_responding_devices(ioc);
mptsas_scan_sas_topology(ioc);
ioc->in_rescan = 0;
mptsas_free_fw_event(ioc, fw_event);
mptsas_fw_event_on(ioc);
return;
}
/* events handling turned off during host reset */
if (ioc->fw_events_off) {
mptsas_free_fw_event(ioc, fw_event);
return;
}
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
"event = (0x%02x)\n", ioc->name, __func__, fw_event,
(fw_event->event & 0xFF)));
switch (fw_event->event) {
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
mptsas_send_sas_event(fw_event);
break;
case MPI_EVENT_INTEGRATED_RAID:
mptsas_send_raid_event(fw_event);
break;
case MPI_EVENT_IR2:
mptsas_send_ir2_event(fw_event);
break;
case MPI_EVENT_PERSISTENT_TABLE_FULL:
mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
mptsas_broadcast_primative_work(fw_event);
break;
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
mptsas_send_expander_event(fw_event);
break;
case MPI_EVENT_SAS_PHY_LINK_STATUS:
mptsas_send_link_status_event(fw_event);
break;
case MPI_EVENT_QUEUE_FULL:
mptsas_handle_queue_full_event(fw_event);
break;
}
}
static int
mptsas_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
MPT_ADAPTER *ioc = hd->ioc;
VirtDevice *vdevice = sdev->hostdata;
if (vdevice->vtarget->deleted) {
sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
vdevice->vtarget->deleted = 0;
}
/*
* RAID volumes placed beyond the last expected port.
* Ignore sending sas mode pages in that case..
*/
if (sdev->channel == MPTSAS_RAID_CHANNEL) {
mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
goto out;
}
sas_read_port_mode_page(sdev);
mptsas_add_device_component_starget(ioc, scsi_target(sdev));
out:
return mptscsih_slave_configure(sdev);
}
static int
mptsas_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(&starget->dev);
MPT_SCSI_HOST *hd = shost_priv(host);
VirtTarget *vtarget;
u8 id, channel;
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
int i;
MPT_ADAPTER *ioc = hd->ioc;
vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
if (!vtarget)
return -ENOMEM;
vtarget->starget = starget;
vtarget->ioc_id = ioc->id;
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
id = starget->id;
channel = 0;
/*
* RAID volumes placed beyond the last expected port.
*/
if (starget->channel == MPTSAS_RAID_CHANNEL) {
if (!ioc->raid_data.pIocPg2) {
kfree(vtarget);
return -ENXIO;
}
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
if (id == ioc->raid_data.pIocPg2->
RaidVolume[i].VolumeID) {
channel = ioc->raid_data.pIocPg2->
RaidVolume[i].VolumeBus;
}
}
vtarget->raidVolume = 1;
goto out;
}
rphy = dev_to_rphy(starget->dev.parent);
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address !=
rphy->identify.sas_address)
continue;
id = p->phy_info[i].attached.id;
channel = p->phy_info[i].attached.channel;
mptsas_set_starget(&p->phy_info[i], starget);
/*
* Exposing hidden raid components
*/
if (mptscsih_is_phys_disk(ioc, channel, id)) {
id = mptscsih_raid_id_to_num(ioc,
channel, id);
vtarget->tflags |=
MPT_TARGET_FLAGS_RAID_COMPONENT;
p->phy_info[i].attached.phys_disk_num = id;
}
mutex_unlock(&ioc->sas_topology_mutex);
goto out;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
kfree(vtarget);
return -ENXIO;
out:
vtarget->id = id;
vtarget->channel = channel;
starget->hostdata = vtarget;
return 0;
}
static void
mptsas_target_destroy(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(&starget->dev);
MPT_SCSI_HOST *hd = shost_priv(host);
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
int i;
MPT_ADAPTER *ioc = hd->ioc;
VirtTarget *vtarget;
if (!starget->hostdata)
return;
vtarget = starget->hostdata;
mptsas_del_device_component_by_os(ioc, starget->channel,
starget->id);
if (starget->channel == MPTSAS_RAID_CHANNEL)
goto out;
rphy = dev_to_rphy(starget->dev.parent);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address !=
rphy->identify.sas_address)
continue;
starget_printk(KERN_INFO, starget, MYIOC_s_FMT
"delete device: fw_channel %d, fw_id %d, phy %d, "
"sas_addr 0x%llx\n", ioc->name,
p->phy_info[i].attached.channel,
p->phy_info[i].attached.id,
p->phy_info[i].attached.phy_id, (unsigned long long)
p->phy_info[i].attached.sas_address);
mptsas_set_starget(&p->phy_info[i], NULL);
}
}
out:
vtarget->starget = NULL;
kfree(starget->hostdata);
starget->hostdata = NULL;
}
static int
mptsas_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *host = sdev->host;
MPT_SCSI_HOST *hd = shost_priv(host);
struct sas_rphy *rphy;
struct mptsas_portinfo *p;
VirtDevice *vdevice;
struct scsi_target *starget;
int i;
MPT_ADAPTER *ioc = hd->ioc;
vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
if (!vdevice) {
printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n",
ioc->name, sizeof(VirtDevice));
return -ENOMEM;
}
starget = scsi_target(sdev);
vdevice->vtarget = starget->hostdata;
if (sdev->channel == MPTSAS_RAID_CHANNEL)
goto out;
rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address !=
rphy->identify.sas_address)
continue;
vdevice->lun = sdev->lun;
/*
* Exposing hidden raid components
*/
if (mptscsih_is_phys_disk(ioc,
p->phy_info[i].attached.channel,
p->phy_info[i].attached.id))
sdev->no_uld_attach = 1;
mutex_unlock(&ioc->sas_topology_mutex);
goto out;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
kfree(vdevice);
return -ENXIO;
out:
vdevice->vtarget->num_luns++;
sdev->hostdata = vdevice;
return 0;
}
static int
mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
VirtDevice *vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt);
return 0;
}
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
if (ioc->sas_discovery_quiesce_io)
return SCSI_MLQUEUE_HOST_BUSY;
if (ioc->debug_level & MPT_DEBUG_SCSI)
scsi_print_command(SCpnt);
return mptscsih_qcmd(SCpnt,done);
}
static DEF_SCSI_QCMD(mptsas_qcmd)
/**
* mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
* if the device under question is currently in the
* device removal delay.
* @sc: scsi command that the midlayer is about to time out
*
**/
static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
VirtDevice *vdevice;
enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
hd = shost_priv(sc->device->host);
if (hd == NULL) {
printk(KERN_ERR MYNAM ": %s: Can't locate host! (sc=%p)\n",
__func__, sc);
goto done;
}
ioc = hd->ioc;
if (ioc->bus_type != SAS) {
printk(KERN_ERR MYNAM ": %s: Wrong bus type (sc=%p)\n",
__func__, sc);
goto done;
}
/* In case if IOC is in reset from internal context.
* Do not execute EEH for the same IOC. SML should to reset timer.
*/
if (ioc->ioc_reset_in_progress) {
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: ioc is in reset,"
"SML need to reset the timer (sc=%p)\n",
ioc->name, __func__, sc));
rc = BLK_EH_RESET_TIMER;
}
vdevice = sc->device->hostdata;
if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
|| vdevice->vtarget->deleted)) {
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: target removed "
"or in device removal delay (sc=%p)\n",
ioc->name, __func__, sc));
rc = BLK_EH_RESET_TIMER;
goto done;
}
done:
return rc;
}
static struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
.proc_info = mptscsih_proc_info,
.name = "MPT SAS Host",
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
.slave_alloc = mptsas_slave_alloc,
.slave_configure = mptsas_slave_configure,
.target_destroy = mptsas_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_SAS_CAN_QUEUE,
.this_id = -1,
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
{
MPT_ADAPTER *ioc = phy_to_ioc(phy);
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasPhyPage1_t *buffer;
dma_addr_t dma_handle;
int error;
/* FIXME: only have link errors on local phys */
if (!scsi_is_sas_phy_local(phy))
return -EINVAL;
hdr.PageVersion = MPI_SASPHY1_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 1 /* page number 1*/;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = phy->identify.phy_identifier;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
return error;
if (!hdr.ExtPageLength)
return -ENXIO;
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer)
return -ENOMEM;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
mptsas_print_phy_pg1(ioc, buffer);
phy->invalid_dword_count = le32_to_cpu(buffer->InvalidDwordCount);
phy->running_disparity_error_count =
le32_to_cpu(buffer->RunningDisparityErrorCount);
phy->loss_of_dword_sync_count =
le32_to_cpu(buffer->LossDwordSynchCount);
phy->phy_reset_problem_count =
le32_to_cpu(buffer->PhyResetProblemCount);
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
return error;
}
static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (reply != NULL) {
ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->sas_mgmt.reply, reply,
min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
}
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->sas_mgmt.done);
return 1;
}
return 0;
}
static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
{
MPT_ADAPTER *ioc = phy_to_ioc(phy);
SasIoUnitControlRequest_t *req;
SasIoUnitControlReply_t *reply;
MPT_FRAME_HDR *mf;
MPIHeader_t *hdr;
unsigned long timeleft;
int error = -ERESTARTSYS;
/* FIXME: fusion doesn't allow non-local phy reset */
if (!scsi_is_sas_phy_local(phy))
return -EINVAL;
/* not implemented for expanders */
if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP)
return -ENXIO;
if (mutex_lock_interruptible(&ioc->sas_mgmt.mutex))
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
error = -ENOMEM;
goto out_unlock;
}
hdr = (MPIHeader_t *) mf;
req = (SasIoUnitControlRequest_t *)mf;
memset(req, 0, sizeof(SasIoUnitControlRequest_t));
req->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
req->MsgContext = hdr->MsgContext;
req->Operation = hard_reset ?
MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
req->PhyNum = phy->identify.phy_identifier;
INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
10 * HZ);
if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
error = -ETIME;
mpt_free_msg_frame(ioc, mf);
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_unlock;
if (!timeleft)
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_unlock;
}
/* a reply frame is expected */
if ((ioc->sas_mgmt.status &
MPT_MGMT_STATUS_RF_VALID) == 0) {
error = -ENXIO;
goto out_unlock;
}
/* process the completed Reply Message Frame */
reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
error = -ENXIO;
goto out_unlock;
}
error = 0;
out_unlock:
CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return error;
}
static int
mptsas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
int i, error;
struct mptsas_portinfo *p;
struct mptsas_enclosure enclosure_info;
u64 enclosure_handle;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address ==
rphy->identify.sas_address) {
enclosure_handle = p->phy_info[i].
attached.handle_enclosure;
goto found_info;
}
}
}
mutex_unlock(&ioc->sas_topology_mutex);
return -ENXIO;
found_info:
mutex_unlock(&ioc->sas_topology_mutex);
memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
error = mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
(MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), enclosure_handle);
if (!error)
*identifier = enclosure_info.enclosure_logical_id;
return error;
}
static int
mptsas_get_bay_identifier(struct sas_rphy *rphy)
{
MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
struct mptsas_portinfo *p;
int i, rc;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(p, &ioc->sas_topology, list) {
for (i = 0; i < p->num_phys; i++) {
if (p->phy_info[i].attached.sas_address ==
rphy->identify.sas_address) {
rc = p->phy_info[i].attached.slot;
goto out;
}
}
}
rc = -ENXIO;
out:
mutex_unlock(&ioc->sas_topology_mutex);
return rc;
}
static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req)
{
MPT_ADAPTER *ioc = ((MPT_SCSI_HOST *) shost->hostdata)->ioc;
MPT_FRAME_HDR *mf;
SmpPassthroughRequest_t *smpreq;
struct request *rsp = req->next_rq;
int ret;
int flagsLength;
unsigned long timeleft;
char *psge;
dma_addr_t dma_addr_in = 0;
dma_addr_t dma_addr_out = 0;
u64 sas_address = 0;
if (!rsp) {
printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
ioc->name, __func__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
if (ret)
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
ret = -ENOMEM;
goto out_unlock;
}
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
sas_address = rphy->identify.sas_address;
else {
struct mptsas_portinfo *port_info;
mutex_lock(&ioc->sas_topology_mutex);
port_info = ioc->hba_port_info;
if (port_info && port_info->phy_info)
sas_address =
port_info->phy_info[0].phy->identify.sas_address;
mutex_unlock(&ioc->sas_topology_mutex);
}
*((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
psge = (char *)
(((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
/* request */
flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION)
<< MPI_SGE_FLAGS_SHIFT;
flagsLength |= (blk_rq_bytes(req) - 4);
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out)
goto put_mf;
ioc->add_sge(psge, flagsLength, dma_addr_out);
psge += ioc->SGE_size;
/* response */
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_IOC_TO_HOST |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
flagsLength |= blk_rq_bytes(rsp) + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in)
goto unmap;
ioc->add_sge(psge, flagsLength, dma_addr_in);
INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
mpt_free_msg_frame(ioc, mf);
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto unmap;
if (!timeleft)
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto unmap;
}
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
SmpPassthroughReply_t *smprep;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
req->resid_len = 0;
rsp->resid_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
ret = -ENXIO;
}
unmap:
if (dma_addr_out)
pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
out_unlock:
CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return ret;
}
static struct sas_function_template mptsas_transport_functions = {
.get_linkerrors = mptsas_get_linkerrors,
.get_enclosure_identifier = mptsas_get_enclosure_identifier,
.get_bay_identifier = mptsas_get_bay_identifier,
.phy_reset = mptsas_phy_reset,
.smp_handler = mptsas_smp_handler,
};
static struct scsi_transport_template *mptsas_transport_template;
static int
mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage0_t *buffer;
dma_addr_t dma_handle;
int error, i;
hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
port_info->num_phys = buffer->NumPhys;
port_info->phy_info = kcalloc(port_info->num_phys,
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
if (!port_info->phy_info) {
error = -ENOMEM;
goto out_free_consistent;
}
ioc->nvdata_version_persistent =
le16_to_cpu(buffer->NvdataVersionPersistent);
ioc->nvdata_version_default =
le16_to_cpu(buffer->NvdataVersionDefault);
for (i = 0; i < port_info->num_phys; i++) {
mptsas_print_phy_data(ioc, &buffer->PhyData[i]);
port_info->phy_info[i].phy_id = i;
port_info->phy_info[i].port_id =
buffer->PhyData[i].Port;
port_info->phy_info[i].negotiated_link_rate =
buffer->PhyData[i].NegotiatedLinkRate;
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(buffer->PhyData[i].ControllerDevHandle);
}
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
static int
mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage1_t *buffer;
dma_addr_t dma_handle;
int error;
u8 device_missing_delay;
memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
memset(&cfg, 0, sizeof(CONFIGPARMS));
cfg.cfghdr.ehdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
cfg.cfghdr.ehdr->PageNumber = 1;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
ioc->io_missing_delay =
le16_to_cpu(buffer->IODeviceMissingDelay);
device_missing_delay = buffer->ReportDeviceMissingDelay;
ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
(device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
static int
mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasPhyPage0_t *buffer;
dma_addr_t dma_handle;
int error;
hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
cfg.cfghdr.ehdr = &hdr;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
/* Get Phy Pg 0 for each Phy. */
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
mptsas_print_phy_pg0(ioc, buffer);
phy_info->hw_link_rate = buffer->HwLinkRate;
phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
static int
mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasDevicePage0_t *buffer;
dma_addr_t dma_handle;
__le64 sas_address;
int error=0;
hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
cfg.cfghdr.ehdr = &hdr;
cfg.pageAddr = form + form_specific;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
memset(device_info, 0, sizeof(struct mptsas_devinfo));
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
if (error)
goto out_free_consistent;
mptsas_print_device_pg0(ioc, buffer);
memset(device_info, 0, sizeof(struct mptsas_devinfo));
device_info->handle = le16_to_cpu(buffer->DevHandle);
device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
device_info->handle_enclosure =
le16_to_cpu(buffer->EnclosureHandle);
device_info->slot = le16_to_cpu(buffer->Slot);
device_info->phy_id = buffer->PhyNum;
device_info->port_id = buffer->PhysicalPort;
device_info->id = buffer->TargetID;
device_info->phys_disk_num = ~0;
device_info->channel = buffer->Bus;
memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
device_info->sas_address = le64_to_cpu(sas_address);
device_info->device_info =
le32_to_cpu(buffer->DeviceInfo);
device_info->flags = le16_to_cpu(buffer->Flags);
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
static int
mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasExpanderPage0_t *buffer;
dma_addr_t dma_handle;
int i, error;
__le64 sas_address;
memset(port_info, 0, sizeof(struct mptsas_portinfo));
hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 0;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
memset(port_info, 0, sizeof(struct mptsas_portinfo));
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
if (error)
goto out_free_consistent;
/* save config data */
port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
if (!port_info->phy_info) {
error = -ENOMEM;
goto out_free_consistent;
}
memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(buffer->DevHandle);
port_info->phy_info[i].identify.sas_address =
le64_to_cpu(sas_address);
port_info->phy_info[i].identify.handle_parent =
le16_to_cpu(buffer->ParentDevHandle);
}
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
static int
mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
u32 form, u32 form_specific)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasExpanderPage1_t *buffer;
dma_addr_t dma_handle;
int error=0;
hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 1;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = form + form_specific;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
error = mpt_config(ioc, &cfg);
if (error)
goto out;
if (!hdr.ExtPageLength) {
error = -ENXIO;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer) {
error = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
if (error)
goto out_free_consistent;
mptsas_print_expander_pg1(ioc, buffer);
/* save config data */
phy_info->phy_id = buffer->PhyIdentifier;
phy_info->port_id = buffer->PhysicalPort;
phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
phy_info->hw_link_rate = buffer->HwLinkRate;
phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return error;
}
struct rep_manu_request{
u8 smp_frame_type;
u8 function;
u8 reserved;
u8 request_length;
};
struct rep_manu_reply{
u8 smp_frame_type; /* 0x41 */
u8 function; /* 0x01 */
u8 function_result;
u8 response_length;
u16 expander_change_count;
u8 reserved0[2];
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
u16 component_id;
u8 component_revision_id;
u8 reserved3;
u8 vendor_specific[8];
};
/**
* mptsas_exp_repmanufacture_info -
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
*
* Fills in the sas_expander_device object when SMP port is created.
*
* Returns 0 for success, non-zero for failure.
*/
static int
mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
u64 sas_address, struct sas_expander_device *edev)
{
MPT_FRAME_HDR *mf;
SmpPassthroughRequest_t *smpreq;
SmpPassthroughReply_t *smprep;
struct rep_manu_reply *manufacture_reply;
struct rep_manu_request *manufacture_request;
int ret;
int flagsLength;
unsigned long timeleft;
char *psge;
unsigned long flags;
void *data_out = NULL;
dma_addr_t data_out_dma = 0;
u32 sz;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
if (ret)
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
ret = -ENOMEM;
goto out_unlock;
}
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
if (!data_out) {
printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
ret = -ENOMEM;
goto put_mf;
}
manufacture_request = data_out;
manufacture_request->smp_frame_type = 0x40;
manufacture_request->function = 1;
manufacture_request->reserved = 0;
manufacture_request->request_length = 0;
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
smpreq->PhysicalPort = 0xFF;
*((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
smpreq->RequestDataLength = sizeof(struct rep_manu_request);
psge = (char *)
(((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
flagsLength |= sizeof(struct rep_manu_request);
ioc->add_sge(psge, flagsLength, data_out_dma);
psge += ioc->SGE_size;
flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_IOC_TO_HOST |
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
flagsLength |= sizeof(struct rep_manu_reply);
ioc->add_sge(psge, flagsLength, data_out_dma +
sizeof(struct rep_manu_request));
INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
mpt_free_msg_frame(ioc, mf);
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_free;
if (!timeleft)
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_free;
}
mf = NULL;
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
u8 *tmp;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
if (le16_to_cpu(smprep->ResponseDataLength) !=
sizeof(struct rep_manu_reply))
goto out_free;
manufacture_reply = data_out + sizeof(struct rep_manu_request);
strncpy(edev->vendor_id, manufacture_reply->vendor_id,
SAS_EXPANDER_VENDOR_ID_LEN);
strncpy(edev->product_id, manufacture_reply->product_id,
SAS_EXPANDER_PRODUCT_ID_LEN);
strncpy(edev->product_rev, manufacture_reply->product_rev,
SAS_EXPANDER_PRODUCT_REV_LEN);
edev->level = manufacture_reply->sas_format;
if (manufacture_reply->sas_format) {
strncpy(edev->component_vendor_id,
manufacture_reply->component_vendor_id,
SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
tmp = (u8 *)&manufacture_reply->component_id;
edev->component_id = tmp[0] << 8 | tmp[1];
edev->component_revision_id =
manufacture_reply->component_revision_id;
}
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
ret = -ENXIO;
}
out_free:
if (data_out_dma)
pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
out_unlock:
CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return ret;
}
static void
mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info)
{
u16 protocols;
identify->sas_address = device_info->sas_address;
identify->phy_identifier = device_info->phy_id;
/*
* Fill in Phy Initiator Port Protocol.
* Bits 6:3, more than one bit can be set, fall through cases.
*/
protocols = device_info->device_info & 0x78;
identify->initiator_port_protocols = 0;
if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
/*
* Fill in Phy Target Port Protocol.
* Bits 10:7, more than one bit can be set, fall through cases.
*/
protocols = device_info->device_info & 0x780;
identify->target_port_protocols = 0;
if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_SSP;
if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_STP;
if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
identify->target_port_protocols |= SAS_PROTOCOL_SMP;
if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
identify->target_port_protocols |= SAS_PROTOCOL_SATA;
/*
* Fill in Attached device type.
*/
switch (device_info->device_info &
MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
case MPI_SAS_DEVICE_INFO_NO_DEVICE:
identify->device_type = SAS_PHY_UNUSED;
break;
case MPI_SAS_DEVICE_INFO_END_DEVICE:
identify->device_type = SAS_END_DEVICE;
break;
case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
break;
case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
break;
}
}
static int mptsas_probe_one_phy(struct device *dev,
struct mptsas_phyinfo *phy_info, int index, int local)
{
MPT_ADAPTER *ioc;
struct sas_phy *phy;
struct sas_port *port;
int error = 0;
VirtTarget *vtarget;
if (!dev) {
error = -ENODEV;
goto out;
}
if (!phy_info->phy) {
phy = sas_phy_alloc(dev, index);
if (!phy) {
error = -ENOMEM;
goto out;
}
} else
phy = phy_info->phy;
mptsas_parse_device_info(&phy->identify, &phy_info->identify);
/*
* Set Negotiated link rate.
*/
switch (phy_info->negotiated_link_rate) {
case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
phy->negotiated_linkrate = SAS_PHY_DISABLED;
break;
case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
phy->negotiated_linkrate = SAS_LINK_RATE_FAILED;
break;
case MPI_SAS_IOUNIT0_RATE_1_5:
phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_IOUNIT0_RATE_3_0:
phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
case MPI_SAS_IOUNIT0_RATE_6_0:
phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
break;
case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
default:
phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
break;
}
/*
* Set Max hardware link rate.
*/
switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
phy->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
/*
* Set Max programmed link rate.
*/
switch (phy_info->programmed_link_rate &
MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
phy->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
/*
* Set Min hardware link rate.
*/
switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
phy->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
/*
* Set Min programmed link rate.
*/
switch (phy_info->programmed_link_rate &
MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
phy->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
default:
break;
}
if (!phy_info->phy) {
error = sas_phy_add(phy);
if (error) {
sas_phy_free(phy);
goto out;
}
phy_info->phy = phy;
}
if (!phy_info->attached.handle ||
!phy_info->port_details)
goto out;
port = mptsas_get_port(phy_info);
ioc = phy_to_ioc(phy_info->phy);
if (phy_info->sas_port_add_phy) {
if (!port) {
port = sas_port_alloc_num(dev);
if (!port) {
error = -ENOMEM;
goto out;
}
error = sas_port_add(port);
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
goto out;
}
mptsas_set_port(ioc, phy_info, port);
devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
ioc->name, port->port_identifier,
(unsigned long long)phy_info->
attached.sas_address));
}
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"sas_port_add_phy: phy_id=%d\n",
ioc->name, phy_info->phy_id));
sas_port_add_phy(port, phy_info->phy);
phy_info->sas_port_add_phy = 0;
devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
phy_info->phy_id, phy_info->phy));
}
if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
struct sas_rphy *rphy;
struct device *parent;
struct sas_identify identify;
parent = dev->parent->parent;
/*
* Let the hotplug_work thread handle processing
* the adding/removing of devices that occur
* after start of day.
*/
if (mptsas_is_end_device(&phy_info->attached) &&
phy_info->attached.handle_parent) {
goto out;
}
mptsas_parse_device_info(&identify, &phy_info->attached);
if (scsi_is_host_device(parent)) {
struct mptsas_portinfo *port_info;
int i;
port_info = ioc->hba_port_info;
for (i = 0; i < port_info->num_phys; i++)
if (port_info->phy_info[i].identify.sas_address ==
identify.sas_address) {
sas_port_mark_backlink(port);
goto out;
}
} else if (scsi_is_sas_rphy(parent)) {
struct sas_rphy *parent_rphy = dev_to_rphy(parent);
if (identify.sas_address ==
parent_rphy->identify.sas_address) {
sas_port_mark_backlink(port);
goto out;
}
}
switch (identify.device_type) {
case SAS_END_DEVICE:
rphy = sas_end_device_alloc(port);
break;
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
rphy = sas_expander_alloc(port, identify.device_type);
break;
default:
rphy = NULL;
break;
}
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
goto out;
}
rphy->identify = identify;
error = sas_rphy_add(rphy);
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
sas_rphy_free(rphy);
goto out;
}
mptsas_set_rphy(ioc, phy_info, rphy);
if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
mptsas_exp_repmanufacture_info(ioc,
identify.sas_address,
rphy_to_expander_device(rphy));
}
/* If the device exists,verify it wasn't previously flagged
as a missing device. If so, clear it */
vtarget = mptsas_find_vtarget(ioc,
phy_info->attached.channel,
phy_info->attached.id);
if (vtarget && vtarget->inDMD) {
printk(KERN_INFO "Device returned, unsetting inDMD\n");
vtarget->inDMD = 0;
}
out:
return error;
}
static int
mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
{
struct mptsas_portinfo *port_info, *hba;
int error = -ENOMEM, i;
hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (! hba)
goto out;
error = mptsas_sas_io_unit_pg0(ioc, hba);
if (error)
goto out_free_port_info;
mptsas_sas_io_unit_pg1(ioc);
mutex_lock(&ioc->sas_topology_mutex);
port_info = ioc->hba_port_info;
if (!port_info) {
ioc->hba_port_info = port_info = hba;
ioc->hba_port_num_phy = port_info->num_phys;
list_add_tail(&port_info->list, &ioc->sas_topology);
} else {
for (i = 0; i < hba->num_phys; i++) {
port_info->phy_info[i].negotiated_link_rate =
hba->phy_info[i].negotiated_link_rate;
port_info->phy_info[i].handle =
hba->phy_info[i].handle;
port_info->phy_info[i].port_id =
hba->phy_info[i].port_id;
}
kfree(hba->phy_info);
kfree(hba);
hba = NULL;
}
mutex_unlock(&ioc->sas_topology_mutex);
#if defined(CPQ_CIM)
ioc->num_ports = port_info->num_phys;
#endif
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
(MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
port_info->phy_info[i].identify.handle =
port_info->phy_info[i].handle;
mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].identify.handle);
if (!ioc->hba_port_sas_addr)
ioc->hba_port_sas_addr =
port_info->phy_info[i].identify.sas_address;
port_info->phy_info[i].identify.phy_id =
port_info->phy_info[i].phy_id = i;
if (port_info->phy_info[i].attached.handle)
mptsas_sas_device_pg0(ioc,
&port_info->phy_info[i].attached,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].attached.handle);
}
mptsas_setup_wide_ports(ioc, port_info);
for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
mptsas_probe_one_phy(&ioc->sh->shost_gendev,
&port_info->phy_info[i], ioc->sas_index, 1);
return 0;
out_free_port_info:
kfree(hba);
out:
return error;
}
static void
mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
{
struct mptsas_portinfo *parent;
struct device *parent_dev;
struct sas_rphy *rphy;
int i;
u64 sas_address; /* expander sas address */
u32 handle;
handle = port_info->phy_info[0].handle;
sas_address = port_info->phy_info[0].identify.sas_address;
for (i = 0; i < port_info->num_phys; i++) {
mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
(MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
mptsas_sas_device_pg0(ioc,
&port_info->phy_info[i].identify,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].identify.handle);
port_info->phy_info[i].identify.phy_id =
port_info->phy_info[i].phy_id;
if (port_info->phy_info[i].attached.handle) {
mptsas_sas_device_pg0(ioc,
&port_info->phy_info[i].attached,
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].attached.handle);
port_info->phy_info[i].attached.phy_id =
port_info->phy_info[i].phy_id;
}
}
mutex_lock(&ioc->sas_topology_mutex);
parent = mptsas_find_portinfo_by_handle(ioc,
port_info->phy_info[0].identify.handle_parent);
if (!parent) {
mutex_unlock(&ioc->sas_topology_mutex);
return;
}
for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
i++) {
if (parent->phy_info[i].attached.sas_address == sas_address) {
rphy = mptsas_get_rphy(&parent->phy_info[i]);
parent_dev = &rphy->dev;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
mptsas_setup_wide_ports(ioc, port_info);
for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
ioc->sas_index, 0);
}
static void
mptsas_expander_event_add(MPT_ADAPTER *ioc,
MpiEventDataSasExpanderStatusChange_t *expander_data)
{
struct mptsas_portinfo *port_info;
int i;
__le64 sas_address;
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (!port_info)
BUG();
port_info->num_phys = (expander_data->NumPhys) ?
expander_data->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
if (!port_info->phy_info)
BUG();
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(expander_data->DevHandle);
port_info->phy_info[i].identify.sas_address =
le64_to_cpu(sas_address);
port_info->phy_info[i].identify.handle_parent =
le16_to_cpu(expander_data->ParentDevHandle);
}
mutex_lock(&ioc->sas_topology_mutex);
list_add_tail(&port_info->list, &ioc->sas_topology);
mutex_unlock(&ioc->sas_topology_mutex);
printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)sas_address);
mptsas_expander_refresh(ioc, port_info);
}
/**
* mptsas_delete_expander_siblings - remove siblings attached to expander
* @ioc: Pointer to MPT_ADAPTER structure
* @parent: the parent port_info object
* @expander: the expander port_info object
**/
static void
mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
*parent, struct mptsas_portinfo *expander)
{
struct mptsas_phyinfo *phy_info;
struct mptsas_portinfo *port_info;
struct sas_rphy *rphy;
int i;
phy_info = expander->phy_info;
for (i = 0; i < expander->num_phys; i++, phy_info++) {
rphy = mptsas_get_rphy(phy_info);
if (!rphy)
continue;
if (rphy->identify.device_type == SAS_END_DEVICE)
mptsas_del_end_device(ioc, phy_info);
}
phy_info = expander->phy_info;
for (i = 0; i < expander->num_phys; i++, phy_info++) {
rphy = mptsas_get_rphy(phy_info);
if (!rphy)
continue;
if (rphy->identify.device_type ==
MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
rphy->identify.device_type ==
MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
port_info = mptsas_find_portinfo_by_sas_address(ioc,
rphy->identify.sas_address);
if (!port_info)
continue;
if (port_info == parent) /* backlink rphy */
continue;
/*
Delete this expander even if the expdevpage is exists
because the parent expander is already deleted
*/
mptsas_expander_delete(ioc, port_info, 1);
}
}
}
/**
* mptsas_expander_delete - remove this expander
* @ioc: Pointer to MPT_ADAPTER structure
* @port_info: expander port_info struct
* @force: Flag to forcefully delete the expander
*
**/
static void mptsas_expander_delete(MPT_ADAPTER *ioc,
struct mptsas_portinfo *port_info, u8 force)
{
struct mptsas_portinfo *parent;
int i;
u64 expander_sas_address;
struct mptsas_phyinfo *phy_info;
struct mptsas_portinfo buffer;
struct mptsas_portinfo_details *port_details;
struct sas_port *port;
if (!port_info)
return;
/* see if expander is still there before deleting */
mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
port_info->phy_info[0].identify.handle);
if (buffer.num_phys) {
kfree(buffer.phy_info);
if (!force)
return;
}
/*
* Obtain the port_info instance to the parent port
*/
port_details = NULL;
expander_sas_address =
port_info->phy_info[0].identify.sas_address;
parent = mptsas_find_portinfo_by_handle(ioc,
port_info->phy_info[0].identify.handle_parent);
mptsas_delete_expander_siblings(ioc, parent, port_info);
if (!parent)
goto out;
/*
* Delete rphys in the parent that point
* to this expander.
*/
phy_info = parent->phy_info;
port = NULL;
for (i = 0; i < parent->num_phys; i++, phy_info++) {
if (!phy_info->phy)
continue;
if (phy_info->attached.sas_address !=
expander_sas_address)
continue;
if (!port) {
port = mptsas_get_port(phy_info);
port_details = phy_info->port_details;
}
dev_printk(KERN_DEBUG, &phy_info->phy->dev,
MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
phy_info->phy_id, phy_info->phy);
sas_port_delete_phy(port, phy_info->phy);
}
if (port) {
dev_printk(KERN_DEBUG, &port->dev,
MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
ioc->name, port->port_identifier,
(unsigned long long)expander_sas_address);
sas_port_delete(port);
mptsas_port_delete(ioc, port_details);
}
out:
printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)expander_sas_address);
/*
* free link
*/
list_del(&port_info->list);
kfree(port_info->phy_info);
kfree(port_info);
}
/**
* mptsas_send_expander_event - expanders events
* @ioc: Pointer to MPT_ADAPTER structure
* @expander_data: event data
*
*
* This function handles adding, removing, and refreshing
* device handles within the expander objects.
*/
static void
mptsas_send_expander_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
MpiEventDataSasExpanderStatusChange_t *expander_data;
struct mptsas_portinfo *port_info;
__le64 sas_address;
int i;
ioc = fw_event->ioc;
expander_data = (MpiEventDataSasExpanderStatusChange_t *)
fw_event->event_data;
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
if (port_info) {
for (i = 0; i < port_info->num_phys; i++) {
port_info->phy_info[i].portinfo = port_info;
port_info->phy_info[i].handle =
le16_to_cpu(expander_data->DevHandle);
port_info->phy_info[i].identify.sas_address =
le64_to_cpu(sas_address);
port_info->phy_info[i].identify.handle_parent =
le16_to_cpu(expander_data->ParentDevHandle);
}
mptsas_expander_refresh(ioc, port_info);
} else if (!port_info && expander_data->NumPhys)
mptsas_expander_event_add(ioc, expander_data);
} else if (expander_data->ReasonCode ==
MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
mptsas_expander_delete(ioc, port_info, 0);
mptsas_free_fw_event(ioc, fw_event);
}
/**
* mptsas_expander_add -
* @ioc: Pointer to MPT_ADAPTER structure
* @handle:
*
*/
struct mptsas_portinfo *
mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
{
struct mptsas_portinfo buffer, *port_info;
int i;
if ((mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
return NULL;
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
if (!port_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
return NULL;
}
port_info->num_phys = buffer.num_phys;
port_info->phy_info = buffer.phy_info;
for (i = 0; i < port_info->num_phys; i++)
port_info->phy_info[i].portinfo = port_info;
mutex_lock(&ioc->sas_topology_mutex);
list_add_tail(&port_info->list, &ioc->sas_topology);
mutex_unlock(&ioc->sas_topology_mutex);
printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)buffer.phy_info[0].identify.sas_address);
mptsas_expander_refresh(ioc, port_info);
return port_info;
}
static void
mptsas_send_link_status_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
MpiEventDataSasPhyLinkStatus_t *link_data;
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info = NULL;
__le64 sas_address;
u8 phy_num;
u8 link_rate;
ioc = fw_event->ioc;
link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
link_rate = link_data->LinkRates >> 4;
phy_num = link_data->PhyNum;
port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
if (port_info) {
phy_info = &port_info->phy_info[phy_num];
if (phy_info)
phy_info->negotiated_link_rate = link_rate;
}
if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
if (!port_info) {
if (ioc->old_sas_discovery_protocal) {
port_info = mptsas_expander_add(ioc,
le16_to_cpu(link_data->DevHandle));
if (port_info)
goto out;
}
goto out;
}
if (port_info == ioc->hba_port_info)
mptsas_probe_hba_phys(ioc);
else
mptsas_expander_refresh(ioc, port_info);
} else if (phy_info && phy_info->phy) {
if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
phy_info->phy->negotiated_linkrate =
SAS_PHY_DISABLED;
else if (link_rate ==
MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
phy_info->phy->negotiated_linkrate =
SAS_LINK_RATE_FAILED;
else {
phy_info->phy->negotiated_linkrate =
SAS_LINK_RATE_UNKNOWN;
if (ioc->device_missing_delay &&
mptsas_is_end_device(&phy_info->attached)) {
struct scsi_device *sdev;
VirtDevice *vdevice;
u8 channel, id;
id = phy_info->attached.id;
channel = phy_info->attached.channel;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Link down for fw_id %d:fw_channel %d\n",
ioc->name, phy_info->attached.id,
phy_info->attached.channel));
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
if ((vdevice == NULL) ||
(vdevice->vtarget == NULL))
continue;
if ((vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT ||
vdevice->vtarget->raidVolume))
continue;
if (vdevice->vtarget->id == id &&
vdevice->vtarget->channel ==
channel)
devtprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"SDEV OUTSTANDING CMDS"
"%d\n", ioc->name,
sdev->device_busy));
}
}
}
}
out:
mptsas_free_fw_event(ioc, fw_event);
}
static void
mptsas_not_responding_devices(MPT_ADAPTER *ioc)
{
struct mptsas_portinfo buffer, *port_info;
struct mptsas_device_info *sas_info;
struct mptsas_devinfo sas_device;
u32 handle;
VirtTarget *vtarget = NULL;
struct mptsas_phyinfo *phy_info;
u8 found_expander;
int retval, retry_count;
unsigned long flags;
mpt_findImVolumes(ioc);
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: exiting due to a parallel reset \n", ioc->name,
__func__));
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* devices, logical volumes */
mutex_lock(&ioc->sas_device_info_mutex);
redo_device_scan:
list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
if (sas_info->is_cached)
continue;
if (!sas_info->is_logical_volume) {
sas_device.handle = 0;
retry_count = 0;
retry_page:
retval = mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
<< MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(sas_info->fw.channel << 8) +
sas_info->fw.id);
if (sas_device.handle)
continue;
if (retval == -EBUSY) {
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
dfailprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"%s: exiting due to reset\n",
ioc->name, __func__));
spin_unlock_irqrestore
(&ioc->taskmgmt_lock, flags);
mutex_unlock(&ioc->
sas_device_info_mutex);
return;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock,
flags);
}
if (retval && (retval != -ENODEV)) {
if (retry_count < 10) {
retry_count++;
goto retry_page;
} else {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Config page retry exceeded retry "
"count deleting device 0x%llx\n",
ioc->name, __func__,
sas_info->sas_address));
}
}
/* delete device */
vtarget = mptsas_find_vtarget(ioc,
sas_info->fw.channel, sas_info->fw.id);
if (vtarget)
vtarget->deleted = 1;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_info->sas_address);
if (phy_info) {
mptsas_del_end_device(ioc, phy_info);
goto redo_device_scan;
}
} else
mptsas_volume_delete(ioc, sas_info->fw.id);
}
mutex_unlock(&ioc->sas_device_info_mutex);
/* expanders */
mutex_lock(&ioc->sas_topology_mutex);
redo_expander_scan:
list_for_each_entry(port_info, &ioc->sas_topology, list) {
if (port_info->phy_info &&
(!(port_info->phy_info[0].identify.device_info &
MPI_SAS_DEVICE_INFO_SMP_TARGET)))
continue;
found_expander = 0;
handle = 0xFFFF;
while (!mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
!found_expander) {
handle = buffer.phy_info[0].handle;
if (buffer.phy_info[0].identify.sas_address ==
port_info->phy_info[0].identify.sas_address) {
found_expander = 1;
}
kfree(buffer.phy_info);
}
if (!found_expander) {
mptsas_expander_delete(ioc, port_info, 0);
goto redo_expander_scan;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
}
/**
* mptsas_probe_expanders - adding expanders
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
static void
mptsas_probe_expanders(MPT_ADAPTER *ioc)
{
struct mptsas_portinfo buffer, *port_info;
u32 handle;
int i;
handle = 0xFFFF;
while (!mptsas_sas_expander_pg0(ioc, &buffer,
(MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
handle = buffer.phy_info[0].handle;
port_info = mptsas_find_portinfo_by_sas_address(ioc,
buffer.phy_info[0].identify.sas_address);
if (port_info) {
/* refreshing handles */
for (i = 0; i < buffer.num_phys; i++) {
port_info->phy_info[i].handle = handle;
port_info->phy_info[i].identify.handle_parent =
buffer.phy_info[0].identify.handle_parent;
}
mptsas_expander_refresh(ioc, port_info);
kfree(buffer.phy_info);
continue;
}
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
if (!port_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__func__, __LINE__));
return;
}
port_info->num_phys = buffer.num_phys;
port_info->phy_info = buffer.phy_info;
for (i = 0; i < port_info->num_phys; i++)
port_info->phy_info[i].portinfo = port_info;
mutex_lock(&ioc->sas_topology_mutex);
list_add_tail(&port_info->list, &ioc->sas_topology);
mutex_unlock(&ioc->sas_topology_mutex);
printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
"sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
(unsigned long long)buffer.phy_info[0].identify.sas_address);
mptsas_expander_refresh(ioc, port_info);
}
}
static void
mptsas_probe_devices(MPT_ADAPTER *ioc)
{
u16 handle;
struct mptsas_devinfo sas_device;
struct mptsas_phyinfo *phy_info;
handle = 0xFFFF;
while (!(mptsas_sas_device_pg0(ioc, &sas_device,
MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
handle = sas_device.handle;
if ((sas_device.device_info &
(MPI_SAS_DEVICE_INFO_SSP_TARGET |
MPI_SAS_DEVICE_INFO_STP_TARGET |
MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
continue;
/* If there is no FW B_T mapping for this device then continue
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
continue;
phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
if (!phy_info)
continue;
if (mptsas_get_rphy(phy_info))
continue;
mptsas_add_end_device(ioc, phy_info);
}
}
/**
* mptsas_scan_sas_topology -
* @ioc: Pointer to MPT_ADAPTER structure
* @sas_address:
*
**/
static void
mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
{
struct scsi_device *sdev;
int i;
mptsas_probe_hba_phys(ioc);
mptsas_probe_expanders(ioc);
mptsas_probe_devices(ioc);
/*
Reporting RAID volumes.
*/
if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
!ioc->raid_data.pIocPg2->NumActiveVolumes)
return;
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
if (sdev) {
scsi_device_put(sdev);
continue;
}
printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
}
}
static void
mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
EventDataQueueFull_t *qfull_data;
struct mptsas_device_info *sas_info;
struct scsi_device *sdev;
int depth;
int id = -1;
int channel = -1;
int fw_id, fw_channel;
u16 current_depth;
ioc = fw_event->ioc;
qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
fw_id = qfull_data->TargetID;
fw_channel = qfull_data->Bus;
current_depth = le16_to_cpu(qfull_data->CurrentDepth);
/* if hidden raid component, look for the volume id */
mutex_lock(&ioc->sas_device_info_mutex);
if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
list_for_each_entry(sas_info, &ioc->sas_device_info_list,
list) {
if (sas_info->is_cached ||
sas_info->is_logical_volume)
continue;
if (sas_info->is_hidden_raid_component &&
(sas_info->fw.channel == fw_channel &&
sas_info->fw.id == fw_id)) {
id = sas_info->volume_id;
channel = MPTSAS_RAID_CHANNEL;
goto out;
}
}
} else {
list_for_each_entry(sas_info, &ioc->sas_device_info_list,
list) {
if (sas_info->is_cached ||
sas_info->is_hidden_raid_component ||
sas_info->is_logical_volume)
continue;
if (sas_info->fw.channel == fw_channel &&
sas_info->fw.id == fw_id) {
id = sas_info->os.id;
channel = sas_info->os.channel;
goto out;
}
}
}
out:
mutex_unlock(&ioc->sas_device_info_mutex);
if (id != -1) {
shost_for_each_device(sdev, ioc->sh) {
if (sdev->id == id && sdev->channel == channel) {
if (current_depth > sdev->queue_depth) {
sdev_printk(KERN_INFO, sdev,
"strange observation, the queue "
"depth is (%d) meanwhile fw queue "
"depth (%d)\n", sdev->queue_depth,
current_depth);
continue;
}
depth = scsi_track_queue_full(sdev,
current_depth - 1);
if (depth > 0)
sdev_printk(KERN_INFO, sdev,
"Queue depth reduced to (%d)\n",
depth);
else if (depth < 0)
sdev_printk(KERN_INFO, sdev,
"Tagged Command Queueing is being "
"disabled\n");
else if (depth == 0)
sdev_printk(KERN_INFO, sdev,
"Queue depth not changed yet\n");
}
}
}
mptsas_free_fw_event(ioc, fw_event);
}
static struct mptsas_phyinfo *
mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
{
struct mptsas_portinfo *port_info;
struct mptsas_phyinfo *phy_info = NULL;
int i;
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(port_info, &ioc->sas_topology, list) {
for (i = 0; i < port_info->num_phys; i++) {
if (!mptsas_is_end_device(
&port_info->phy_info[i].attached))
continue;
if (port_info->phy_info[i].attached.sas_address
!= sas_address)
continue;
phy_info = &port_info->phy_info[i];
break;
}
}
mutex_unlock(&ioc->sas_topology_mutex);
return phy_info;
}
/**
* mptsas_find_phyinfo_by_phys_disk_num -
* @ioc: Pointer to MPT_ADAPTER structure
* @phys_disk_num:
* @channel:
* @id:
*
**/
static struct mptsas_phyinfo *
mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
u8 channel, u8 id)
{
struct mptsas_phyinfo *phy_info = NULL;
struct mptsas_portinfo *port_info;
RaidPhysDiskPage1_t *phys_disk = NULL;
int num_paths;
u64 sas_address = 0;
int i;
phy_info = NULL;
if (!ioc->raid_data.pIocPg3)
return NULL;
/* dual port support */
num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
if (!num_paths)
goto out;
phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
(num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
if (!phys_disk)
goto out;
mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
for (i = 0; i < num_paths; i++) {
if ((phys_disk->Path[i].Flags & 1) != 0)
/* entry no longer valid */
continue;
if ((id == phys_disk->Path[i].PhysDiskID) &&
(channel == phys_disk->Path[i].PhysDiskBus)) {
memcpy(&sas_address, &phys_disk->Path[i].WWID,
sizeof(u64));
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_address);
goto out;
}
}
out:
kfree(phys_disk);
if (phy_info)
return phy_info;
/*
* Extra code to handle RAID0 case, where the sas_address is not updated
* in phys_disk_page_1 when hotswapped
*/
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry(port_info, &ioc->sas_topology, list) {
for (i = 0; i < port_info->num_phys && !phy_info; i++) {
if (!mptsas_is_end_device(
&port_info->phy_info[i].attached))
continue;
if (port_info->phy_info[i].attached.phys_disk_num == ~0)
continue;
if ((port_info->phy_info[i].attached.phys_disk_num ==
phys_disk_num) &&
(port_info->phy_info[i].attached.id == id) &&
(port_info->phy_info[i].attached.channel ==
channel))
phy_info = &port_info->phy_info[i];
}
}
mutex_unlock(&ioc->sas_topology_mutex);
return phy_info;
}
static void
mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
{
int rc;
sdev->no_uld_attach = data ? 1 : 0;
rc = scsi_device_reprobe(sdev);
}
static void
mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
{
starget_for_each_device(starget, uld_attach ? (void *)1 : NULL,
mptsas_reprobe_lun);
}
static void
mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidVolumePage0_t buffer = NULL;
RaidPhysDiskPage0_t phys_disk;
int i;
struct mptsas_phyinfo *phy_info;
struct mptsas_devinfo sas_device;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!hdr.PageLength)
goto out;
buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
&dma_handle);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!(buffer->VolumeStatus.Flags &
MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE))
goto out;
if (!buffer->NumPhysDisks)
goto out;
for (i = 0; i < buffer->NumPhysDisks; i++) {
if (mpt_raid_phys_disk_pg0(ioc,
buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
continue;
if (mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(phys_disk.PhysDiskBus << 8) +
phys_disk.PhysDiskID))
continue;
/* If there is no FW B_T mapping for this device then continue
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
continue;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_device.sas_address);
mptsas_add_end_device(ioc, phy_info);
}
out:
if (buffer)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
dma_handle);
}
/*
* Work queue thread to handle SAS hotplug events
*/
static void
mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
struct mptsas_hotplug_event *hot_plug_info)
{
struct mptsas_phyinfo *phy_info;
struct scsi_target * starget;
struct mptsas_devinfo sas_device;
VirtTarget *vtarget;
int i;
struct mptsas_portinfo *port_info;
switch (hot_plug_info->event_type) {
case MPTSAS_ADD_PHYSDISK:
if (!ioc->raid_data.pIocPg2)
break;
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
hot_plug_info->id) {
printk(MYIOC_s_WARN_FMT "firmware bug: unable "
"to add hidden disk - target_id matchs "
"volume_id\n", ioc->name);
mptsas_free_fw_event(ioc, fw_event);
return;
}
}
mpt_findImVolumes(ioc);
case MPTSAS_ADD_DEVICE:
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(hot_plug_info->channel << 8) +
hot_plug_info->id);
/* If there is no FW B_T mapping for this device then break
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
break;
if (!sas_device.handle)
return;
phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
/* Only For SATA Device ADD */
if (!phy_info && (sas_device.device_info &
MPI_SAS_DEVICE_INFO_SATA_DEVICE)) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s %d SATA HOT PLUG: "
"parent handle of device %x\n", ioc->name,
__func__, __LINE__, sas_device.handle_parent));
port_info = mptsas_find_portinfo_by_handle(ioc,
sas_device.handle_parent);
if (port_info == ioc->hba_port_info)
mptsas_probe_hba_phys(ioc);
else if (port_info)
mptsas_expander_refresh(ioc, port_info);
else {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s %d port info is NULL\n",
ioc->name, __func__, __LINE__));
break;
}
phy_info = mptsas_refreshing_device_handles
(ioc, &sas_device);
}
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s %d phy info is NULL\n",
ioc->name, __func__, __LINE__));
break;
}
if (mptsas_get_rphy(phy_info))
break;
mptsas_add_end_device(ioc, phy_info);
break;
case MPTSAS_DEL_DEVICE:
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
hot_plug_info->sas_address);
mptsas_del_end_device(ioc, phy_info);
break;
case MPTSAS_DEL_PHYSDISK:
mpt_findImVolumes(ioc);
phy_info = mptsas_find_phyinfo_by_phys_disk_num(
ioc, hot_plug_info->phys_disk_num,
hot_plug_info->channel,
hot_plug_info->id);
mptsas_del_end_device(ioc, phy_info);
break;
case MPTSAS_ADD_PHYSDISK_REPROBE:
if (mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(hot_plug_info->channel << 8) + hot_plug_info->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
/* If there is no FW B_T mapping for this device then break
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
break;
phy_info = mptsas_find_phyinfo_by_sas_address(
ioc, sas_device.sas_address);
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
starget = mptsas_get_starget(phy_info);
if (!starget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
vtarget = starget->hostdata;
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
mpt_findImVolumes(ioc);
starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
"fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
ioc->name, hot_plug_info->channel, hot_plug_info->id,
hot_plug_info->phys_disk_num, (unsigned long long)
sas_device.sas_address);
vtarget->id = hot_plug_info->phys_disk_num;
vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
mptsas_reprobe_target(starget, 1);
break;
case MPTSAS_DEL_PHYSDISK_REPROBE:
if (mptsas_sas_device_pg0(ioc, &sas_device,
(MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
(hot_plug_info->channel << 8) + hot_plug_info->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n",
ioc->name, __func__,
hot_plug_info->id, __LINE__));
break;
}
/* If there is no FW B_T mapping for this device then break
* */
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|| !(sas_device.flags &
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
break;
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_device.sas_address);
if (!phy_info) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
starget = mptsas_get_starget(phy_info);
if (!starget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
vtarget = starget->hostdata;
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: fw_id=%d exit at line=%d\n", ioc->name,
__func__, hot_plug_info->id, __LINE__));
break;
}
mpt_findImVolumes(ioc);
starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
" fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
ioc->name, hot_plug_info->channel, hot_plug_info->id,
hot_plug_info->phys_disk_num, (unsigned long long)
sas_device.sas_address);
vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
vtarget->id = hot_plug_info->id;
phy_info->attached.phys_disk_num = ~0;
mptsas_reprobe_target(starget, 0);
mptsas_add_device_component_by_fw(ioc,
hot_plug_info->channel, hot_plug_info->id);
break;
case MPTSAS_ADD_RAID:
mpt_findImVolumes(ioc);
printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
hot_plug_info->id);
scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
hot_plug_info->id, 0);
break;
case MPTSAS_DEL_RAID:
mpt_findImVolumes(ioc);
printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
hot_plug_info->id);
scsi_remove_device(hot_plug_info->sdev);
scsi_device_put(hot_plug_info->sdev);
break;
case MPTSAS_ADD_INACTIVE_VOLUME:
mpt_findImVolumes(ioc);
mptsas_adding_inactive_raid_components(ioc,
hot_plug_info->channel, hot_plug_info->id);
break;
default:
break;
}
mptsas_free_fw_event(ioc, fw_event);
}
static void
mptsas_send_sas_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
struct mptsas_hotplug_event hot_plug_info;
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
u32 device_info;
u64 sas_address;
ioc = fw_event->ioc;
sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
fw_event->event_data;
device_info = le32_to_cpu(sas_event_data->DeviceInfo);
if ((device_info &
(MPI_SAS_DEVICE_INFO_SSP_TARGET |
MPI_SAS_DEVICE_INFO_STP_TARGET |
MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
mptsas_free_fw_event(ioc, fw_event);
return;
}
if (sas_event_data->ReasonCode ==
MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
mptsas_free_fw_event(ioc, fw_event);
return;
}
switch (sas_event_data->ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
hot_plug_info.channel = sas_event_data->Bus;
hot_plug_info.id = sas_event_data->TargetID;
hot_plug_info.phy_id = sas_event_data->PhyNum;
memcpy(&sas_address, &sas_event_data->SASAddress,
sizeof(u64));
hot_plug_info.sas_address = le64_to_cpu(sas_address);
hot_plug_info.device_info = device_info;
if (sas_event_data->ReasonCode &
MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
else
hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
/* TODO */
case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
/* TODO */
default:
mptsas_free_fw_event(ioc, fw_event);
break;
}
}
static void
mptsas_send_raid_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
EVENT_DATA_RAID *raid_event_data;
struct mptsas_hotplug_event hot_plug_info;
int status;
int state;
struct scsi_device *sdev = NULL;
VirtDevice *vdevice = NULL;
RaidPhysDiskPage0_t phys_disk;
ioc = fw_event->ioc;
raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
status = le32_to_cpu(raid_event_data->SettingsStatus);
state = (status >> 8) & 0xff;
memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
hot_plug_info.id = raid_event_data->VolumeID;
hot_plug_info.channel = raid_event_data->VolumeBus;
hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
raid_event_data->ReasonCode ==
MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
hot_plug_info.id, 0);
hot_plug_info.sdev = sdev;
if (sdev)
vdevice = sdev->hostdata;
}
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
"ReasonCode=%02x\n", ioc->name, __func__,
raid_event_data->ReasonCode));
switch (raid_event_data->ReasonCode) {
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
switch (state) {
case MPI_PD_STATE_ONLINE:
case MPI_PD_STATE_NOT_COMPATIBLE:
mpt_raid_phys_disk_pg0(ioc,
raid_event_data->PhysDiskNum, &phys_disk);
hot_plug_info.id = phys_disk.PhysDiskID;
hot_plug_info.channel = phys_disk.PhysDiskBus;
hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
break;
case MPI_PD_STATE_FAILED:
case MPI_PD_STATE_MISSING:
case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
break;
default:
break;
}
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED:
if (!sdev)
break;
vdevice->vtarget->deleted = 1; /* block IO */
hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_CREATED:
if (sdev) {
scsi_device_put(sdev);
break;
}
hot_plug_info.event_type = MPTSAS_ADD_RAID;
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
if (!sdev)
break;
vdevice->vtarget->deleted = 1; /* block IO */
hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
}
switch (state) {
case MPI_RAIDVOL0_STATUS_STATE_FAILED:
case MPI_RAIDVOL0_STATUS_STATE_MISSING:
if (!sdev)
break;
vdevice->vtarget->deleted = 1; /* block IO */
hot_plug_info.event_type = MPTSAS_DEL_RAID;
break;
case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
if (sdev) {
scsi_device_put(sdev);
break;
}
hot_plug_info.event_type = MPTSAS_ADD_RAID;
break;
default:
break;
}
break;
default:
break;
}
if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
else
mptsas_free_fw_event(ioc, fw_event);
}
/**
* mptsas_issue_tm - send mptsas internal tm request
* @ioc: Pointer to MPT_ADAPTER structure
* @type: Task Management type
* @channel: channel number for task management
* @id: Logical Target ID for reset (if appropriate)
* @lun: Logical unit for reset (if appropriate)
* @task_context: Context for the task to be aborted
* @timeout: timeout for task management control
*
* return 0 on success and -1 on failure:
*
*/
static int
mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
int task_context, ulong timeout, u8 *issue_reset)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
int retval;
unsigned long timeleft;
*issue_reset = 0;
mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
if (mf == NULL) {
retval = -1; /* return failure */
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
"msg frames!!\n", ioc->name));
goto out;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
"task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
"fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
type, timeout, channel, id, (unsigned long long)lun,
task_context));
pScsiTm = (SCSITaskMgmt_t *) mf;
memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
pScsiTm->TaskType = type;
pScsiTm->MsgFlags = 0;
pScsiTm->TargetID = id;
pScsiTm->Bus = channel;
pScsiTm->ChainOffset = 0;
pScsiTm->Reserved = 0;
pScsiTm->Reserved1 = 0;
pScsiTm->TaskMsgContext = task_context;
int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
retval = 0;
mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
/* Now wait for the command to complete */
timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
timeout*HZ);
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
retval = -1; /* return failure */
dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
"TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
mpt_free_msg_frame(ioc, mf);
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
*issue_reset = 1;
goto out;
}
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
retval = -1; /* return failure */
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt request: failed with no reply\n", ioc->name));
goto out;
}
out:
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
/**
* mptsas_broadcast_primative_work - Handle broadcast primitives
* @work: work queue payload containing info describing the event
*
* this will be handled in workqueue context.
*/
static void
mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc = fw_event->ioc;
MPT_FRAME_HDR *mf;
VirtDevice *vdevice;
int ii;
struct scsi_cmnd *sc;
SCSITaskMgmtReply_t *pScsiTmReply;
u8 issue_reset;
int task_context;
u8 channel, id;
int lun;
u32 termination_count;
u32 query_count;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s - enter\n", ioc->name, __func__));
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
mptsas_requeue_fw_event(ioc, fw_event, 1000);
return;
}
issue_reset = 0;
termination_count = 0;
query_count = 0;
mpt_findImVolumes(ioc);
pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
for (ii = 0; ii < ioc->req_depth; ii++) {
if (ioc->fw_events_off)
goto out;
sc = mptscsih_get_scsi_lookup(ioc, ii);
if (!sc)
continue;
mf = MPT_INDEX_2_MFPTR(ioc, ii);
if (!mf)
continue;
task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
vdevice = sc->device->hostdata;
if (!vdevice || !vdevice->vtarget)
continue;
if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
continue; /* skip hidden raid components */
if (vdevice->vtarget->raidVolume)
continue; /* skip hidden raid components */
channel = vdevice->vtarget->channel;
id = vdevice->vtarget->id;
lun = vdevice->lun;
if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
channel, id, (u64)lun, task_context, 30, &issue_reset))
goto out;
query_count++;
termination_count +=
le32_to_cpu(pScsiTmReply->TerminationCount);
if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
(pScsiTmReply->ResponseCode ==
MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
pScsiTmReply->ResponseCode ==
MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
continue;
if (mptsas_issue_tm(ioc,
MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
channel, id, (u64)lun, 0, 30, &issue_reset))
goto out;
termination_count +=
le32_to_cpu(pScsiTmReply->TerminationCount);
}
out:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s - exit, query_count = %d termination_count = %d\n",
ioc->name, __func__, query_count, termination_count));
ioc->broadcast_aen_busy = 0;
mpt_clear_taskmgmt_in_progress_flag(ioc);
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
if (issue_reset) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!! doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
}
mptsas_free_fw_event(ioc, fw_event);
}
/*
* mptsas_send_ir2_event - handle exposing hidden disk when
* an inactive raid volume is added
*
* @ioc: Pointer to MPT_ADAPTER structure
* @ir2_data
*
*/
static void
mptsas_send_ir2_event(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc;
struct mptsas_hotplug_event hot_plug_info;
MPI_EVENT_DATA_IR2 *ir2_data;
u8 reasonCode;
RaidPhysDiskPage0_t phys_disk;
ioc = fw_event->ioc;
ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
reasonCode = ir2_data->ReasonCode;
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
"ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
hot_plug_info.id = ir2_data->TargetID;
hot_plug_info.channel = ir2_data->Bus;
switch (reasonCode) {
case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
mpt_raid_phys_disk_pg0(ioc,
ir2_data->PhysDiskNum, &phys_disk);
hot_plug_info.id = phys_disk.PhysDiskID;
hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
break;
default:
mptsas_free_fw_event(ioc, fw_event);
return;
}
mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
}
static int
mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
{
u32 event = le32_to_cpu(reply->Event);
int sz, event_data_sz;
struct fw_event_work *fw_event;
unsigned long delay;
if (ioc->bus_type != SAS)
return 0;
/* events turned off due to host reset or driver unloading */
if (ioc->fw_events_off)
return 0;
delay = msecs_to_jiffies(1);
switch (event) {
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
{
EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
(EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
if (broadcast_event_data->Primitive !=
MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
return 0;
if (ioc->broadcast_aen_busy)
return 0;
ioc->broadcast_aen_busy = 1;
break;
}
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
{
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
(EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
u16 ioc_stat;
ioc_stat = le16_to_cpu(reply->IOCStatus);
if (sas_event_data->ReasonCode ==
MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
mptsas_target_reset_queue(ioc, sas_event_data);
return 0;
}
if (sas_event_data->ReasonCode ==
MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
ioc->device_missing_delay &&
(ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
VirtTarget *vtarget = NULL;
u8 id, channel;
id = sas_event_data->TargetID;
channel = sas_event_data->Bus;
vtarget = mptsas_find_vtarget(ioc, channel, id);
if (vtarget) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"LogInfo (0x%x) available for "
"INTERNAL_DEVICE_RESET"
"fw_id %d fw_channel %d\n", ioc->name,
le32_to_cpu(reply->IOCLogInfo),
id, channel));
if (vtarget->raidVolume) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Skipping Raid Volume for inDMD\n",
ioc->name));
} else {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Setting device flag inDMD\n",
ioc->name));
vtarget->inDMD = 1;
}
}
}
break;
}
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
{
MpiEventDataSasExpanderStatusChange_t *expander_data =
(MpiEventDataSasExpanderStatusChange_t *)reply->Data;
if (ioc->old_sas_discovery_protocal)
return 0;
if (expander_data->ReasonCode ==
MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
ioc->device_missing_delay)
delay = HZ * ioc->device_missing_delay;
break;
}
case MPI_EVENT_SAS_DISCOVERY:
{
u32 discovery_status;
EventDataSasDiscovery_t *discovery_data =
(EventDataSasDiscovery_t *)reply->Data;
discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
if (ioc->old_sas_discovery_protocal && !discovery_status)
mptsas_queue_rescan(ioc);
return 0;
}
case MPI_EVENT_INTEGRATED_RAID:
case MPI_EVENT_PERSISTENT_TABLE_FULL:
case MPI_EVENT_IR2:
case MPI_EVENT_SAS_PHY_LINK_STATUS:
case MPI_EVENT_QUEUE_FULL:
break;
default:
return 0;
}
event_data_sz = ((reply->MsgLength * 4) -
offsetof(EventNotificationReply_t, Data));
sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
fw_event = kzalloc(sz, GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
__func__, __LINE__);
return 0;
}
memcpy(fw_event->event_data, reply->Data, event_data_sz);
fw_event->event = event;
fw_event->ioc = ioc;
mptsas_add_fw_event(ioc, fw_event, delay);
return 0;
}
/* Delete a volume when no longer listed in ioc pg2
*/
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
{
struct scsi_device *sdev;
int i;
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
if (!sdev)
return;
if (!ioc->raid_data.pIocPg2)
goto out;
if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
goto out;
for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
goto release_sdev;
out:
printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
"id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
scsi_remove_device(sdev);
release_sdev:
scsi_device_put(sdev);
}
static int
mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *sh;
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
unsigned long flags;
int ii;
int numSGE = 0;
int scale;
int ioc_cap;
int error=0;
int r;
r = mpt_attach(pdev,id);
if (r)
return r;
ioc = pci_get_drvdata(pdev);
mptsas_fw_event_off(ioc);
ioc->DoneCtx = mptsasDoneCtx;
ioc->TaskCtx = mptsasTaskCtx;
ioc->InternalCtx = mptsasInternalCtx;
ioc->schedule_target_reset = &mptsas_schedule_target_reset;
ioc->schedule_dead_ioc_flush_running_cmds =
&mptscsih_flush_running_cmds;
/* Added sanity check on readiness of the MPT adapter.
*/
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
printk(MYIOC_s_WARN_FMT
"Skipping because it's not operational!\n",
ioc->name);
error = -ENODEV;
goto out_mptsas_probe;
}
if (!ioc->active) {
printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
ioc->name);
error = -ENODEV;
goto out_mptsas_probe;
}
/* Sanity check - ensure at least 1 port is INITIATOR capable
*/
ioc_cap = 0;
for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
if (ioc->pfacts[ii].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_INITIATOR)
ioc_cap++;
}
if (!ioc_cap) {
printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode "
"is NOT enabled!\n", ioc->name, ioc);
return 0;
}
sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
if (!sh) {
printk(MYIOC_s_WARN_FMT
"Unable to register controller with SCSI subsystem\n",
ioc->name);
error = -1;
goto out_mptsas_probe;
}
spin_lock_irqsave(&ioc->FreeQlock, flags);
/* Attach the SCSI Host to the IOC structure
*/
ioc->sh = sh;
sh->io_port = 0;
sh->n_io_port = 0;
sh->irq = 0;
/* set 16 byte cdb's */
sh->max_cmd_len = 16;
sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue);
sh->max_id = -1;
sh->max_lun = max_lun;
sh->transportt = mptsas_transport_template;
/* Required entry.
*/
sh->unique_id = ioc->id;
INIT_LIST_HEAD(&ioc->sas_topology);
mutex_init(&ioc->sas_topology_mutex);
mutex_init(&ioc->sas_discovery_mutex);
mutex_init(&ioc->sas_mgmt.mutex);
init_completion(&ioc->sas_mgmt.done);
/* Verify that we won't exceed the maximum
* number of chain buffers
* We can optimize: ZZ = req_sz/sizeof(SGE)
* For 32bit SGE's:
* numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
* + (req_sz - 64)/sizeof(SGE)
* A slightly different algorithm is required for
* 64bit SGEs.
*/
scale = ioc->req_sz/ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) *
(ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 64) / ioc->SGE_size;
}
if (numSGE < sh->sg_tablesize) {
/* Reset this value */
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Resetting sg_tablesize to %d from %d\n",
ioc->name, numSGE, sh->sg_tablesize));
sh->sg_tablesize = numSGE;
}
if (mpt_loadtime_max_sectors) {
if (mpt_loadtime_max_sectors < 64 ||
mpt_loadtime_max_sectors > 8192) {
printk(MYIOC_s_INFO_FMT "Invalid value passed for"
"mpt_loadtime_max_sectors %d."
"Range from 64 to 8192\n", ioc->name,
mpt_loadtime_max_sectors);
}
mpt_loadtime_max_sectors &= 0xFFFFFFFE;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Resetting max sector to %d from %d\n",
ioc->name, mpt_loadtime_max_sectors, sh->max_sectors));
sh->max_sectors = mpt_loadtime_max_sectors;
}
hd = shost_priv(sh);
hd->ioc = ioc;
/* SCSI needs scsi_cmnd lookup table!
* (with size equal to req_depth*PtrSz!)
*/
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
if (!ioc->ScsiLookup) {
error = -ENOMEM;
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
goto out_mptsas_probe;
}
spin_lock_init(&ioc->scsi_lookup_lock);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
ioc->name, ioc->ScsiLookup));
ioc->sas_data.ptClear = mpt_pt_clear;
hd->last_queue_full = 0;
INIT_LIST_HEAD(&hd->target_reset_list);
INIT_LIST_HEAD(&ioc->sas_device_info_list);
mutex_init(&ioc->sas_device_info_mutex);
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
if (ioc->sas_data.ptClear==1) {
mptbase_sas_persist_operation(
ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
}
error = scsi_add_host(sh, &ioc->pcidev->dev);
if (error) {
dprintk(ioc, printk(MYIOC_s_ERR_FMT
"scsi_add_host failed\n", ioc->name));
goto out_mptsas_probe;
}
/* older firmware doesn't support expander events */
if ((ioc->facts.HeaderVersion >> 8) < 0xE)
ioc->old_sas_discovery_protocal = 1;
mptsas_scan_sas_topology(ioc);
mptsas_fw_event_on(ioc);
return 0;
out_mptsas_probe:
mptscsih_remove(pdev);
return error;
}
void
mptsas_shutdown(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
mptsas_fw_event_off(ioc);
mptsas_cleanup_fw_event_q(ioc);
}
static void __devexit mptsas_remove(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct mptsas_portinfo *p, *n;
int i;
if (!ioc->sh) {
printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
mpt_detach(pdev);
return;
}
mptsas_shutdown(pdev);
mptsas_del_device_components(ioc);
ioc->sas_discovery_ignore_events = 1;
sas_remove_host(ioc->sh);
mutex_lock(&ioc->sas_topology_mutex);
list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
list_del(&p->list);
for (i = 0 ; i < p->num_phys ; i++)
mptsas_port_delete(ioc, p->phy_info[i].port_details);
kfree(p->phy_info);
kfree(p);
}
mutex_unlock(&ioc->sas_topology_mutex);
ioc->hba_port_info = NULL;
mptscsih_remove(pdev);
}
static struct pci_device_id mptsas_pci_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064E,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068E,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
static struct pci_driver mptsas_driver = {
.name = "mptsas",
.id_table = mptsas_pci_table,
.probe = mptsas_probe,
.remove = __devexit_p(mptsas_remove),
.shutdown = mptsas_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
.resume = mptscsih_resume,
#endif
};
static int __init
mptsas_init(void)
{
int error;
show_mptmod_ver(my_NAME, my_VERSION);
mptsas_transport_template =
sas_attach_transport(&mptsas_transport_functions);
if (!mptsas_transport_template)
return -ENODEV;
mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
"mptscsih_io_done");
mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER,
"mptscsih_taskmgmt_complete");
mptsasInternalCtx =
mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER,
"mptscsih_scandv_complete");
mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER,
"mptsas_mgmt_done");
mptsasDeviceResetCtx =
mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER,
"mptsas_taskmgmt_complete");
mpt_event_register(mptsasDoneCtx, mptsas_event_process);
mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
error = pci_register_driver(&mptsas_driver);
if (error)
sas_release_transport(mptsas_transport_template);
return error;
}
static void __exit
mptsas_exit(void)
{
pci_unregister_driver(&mptsas_driver);
sas_release_transport(mptsas_transport_template);
mpt_reset_deregister(mptsasDoneCtx);
mpt_event_deregister(mptsasDoneCtx);
mpt_deregister(mptsasMgmtCtx);
mpt_deregister(mptsasInternalCtx);
mpt_deregister(mptsasTaskCtx);
mpt_deregister(mptsasDoneCtx);
mpt_deregister(mptsasDeviceResetCtx);
}
module_init(mptsas_init);
module_exit(mptsas_exit);
| gpl-2.0 |
AD5GB/google_kernel_hammerhead | sound/soc/blackfin/bf5xx-ac97-pcm.c | 5075 | 14213 | /*
* File: sound/soc/blackfin/bf5xx-ac97-pcm.c
* Author: Cliff Cai <Cliff.Cai@analog.com>
*
* Created: Tue June 06 2008
* Description: DMA Driver for AC97 sound chip
*
* Modified:
* Copyright 2008 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/dma.h>
#include "bf5xx-ac97-pcm.h"
#include "bf5xx-ac97.h"
#include "bf5xx-sport.h"
static unsigned int ac97_chan_mask[] = {
SP_FL, /* Mono */
SP_STEREO, /* Stereo */
SP_2DOT1, /* 2.1*/
SP_QUAD,/*Quadraquic*/
SP_FL | SP_FR | SP_FC | SP_SL | SP_SR,/*5 channels */
SP_5DOT1, /* 5.1 */
};
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
static void bf5xx_mmap_copy(struct snd_pcm_substream *substream,
snd_pcm_uframes_t count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
unsigned int chan_mask = ac97_chan_mask[runtime->channels - 1];
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
bf5xx_pcm_to_ac97((struct ac97_frame *)sport->tx_dma_buf +
sport->tx_pos, (__u16 *)runtime->dma_area + sport->tx_pos *
runtime->channels, count, chan_mask);
sport->tx_pos += runtime->period_size;
if (sport->tx_pos >= runtime->buffer_size)
sport->tx_pos %= runtime->buffer_size;
sport->tx_delay_pos = sport->tx_pos;
} else {
bf5xx_ac97_to_pcm((struct ac97_frame *)sport->rx_dma_buf +
sport->rx_pos, (__u16 *)runtime->dma_area + sport->rx_pos *
runtime->channels, count);
sport->rx_pos += runtime->period_size;
if (sport->rx_pos >= runtime->buffer_size)
sport->rx_pos %= runtime->buffer_size;
}
}
#endif
static void bf5xx_dma_irq(void *data)
{
struct snd_pcm_substream *pcm = data;
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
struct snd_pcm_runtime *runtime = pcm->runtime;
struct sport_device *sport = runtime->private_data;
bf5xx_mmap_copy(pcm, runtime->period_size);
if (pcm->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (sport->once == 0) {
snd_pcm_period_elapsed(pcm);
bf5xx_mmap_copy(pcm, runtime->period_size);
sport->once = 1;
}
}
#endif
snd_pcm_period_elapsed(pcm);
}
/* The memory size for pure pcm data is 128*1024 = 0x20000 bytes.
* The total rx/tx buffer is for ac97 frame to hold all pcm data
* is 0x20000 * sizeof(struct ac97_frame) / 4.
*/
static const struct snd_pcm_hardware bf5xx_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
#endif
SNDRV_PCM_INFO_BLOCK_TRANSFER,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.period_bytes_min = 32,
.period_bytes_max = 0x10000,
.periods_min = 1,
.periods_max = PAGE_SIZE/32,
.buffer_bytes_max = 0x20000, /* 128 kbytes */
.fifo_size = 16,
};
static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
size_t size = bf5xx_pcm_hardware.buffer_bytes_max
* sizeof(struct ac97_frame) / 4;
snd_pcm_lib_malloc_pages(substream, size);
return 0;
}
static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream)
{
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
sport->once = 0;
if (runtime->dma_area)
memset(runtime->dma_area, 0, runtime->buffer_size);
memset(sport->tx_dma_buf, 0, runtime->buffer_size *
sizeof(struct ac97_frame));
} else
memset(sport->rx_dma_buf, 0, runtime->buffer_size *
sizeof(struct ac97_frame));
#endif
snd_pcm_lib_free_pages(substream);
return 0;
}
static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
/* An intermediate buffer is introduced for implementing mmap for
* SPORT working in TMD mode(include AC97).
*/
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
sport_config_tx_dma(sport, sport->tx_dma_buf, runtime->periods,
runtime->period_size * sizeof(struct ac97_frame));
} else {
sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
sport_config_rx_dma(sport, sport->rx_dma_buf, runtime->periods,
runtime->period_size * sizeof(struct ac97_frame));
}
#else
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
sport_config_tx_dma(sport, runtime->dma_area, runtime->periods,
runtime->period_size * sizeof(struct ac97_frame));
} else {
sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
sport_config_rx_dma(sport, runtime->dma_area, runtime->periods,
runtime->period_size * sizeof(struct ac97_frame));
}
#endif
return 0;
}
static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
int ret = 0;
pr_debug("%s enter\n", __func__);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
bf5xx_mmap_copy(substream, runtime->period_size);
sport->tx_delay_pos = 0;
#endif
sport_tx_start(sport);
} else
sport_rx_start(sport);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
sport->tx_pos = 0;
#endif
sport_tx_stop(sport);
} else {
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
sport->rx_pos = 0;
#endif
sport_rx_stop(sport);
}
break;
default:
ret = -EINVAL;
}
return ret;
}
static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
unsigned int curr;
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
curr = sport->tx_delay_pos;
else
curr = sport->rx_pos;
#else
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
curr = sport_curr_offset_tx(sport) / sizeof(struct ac97_frame);
else
curr = sport_curr_offset_rx(sport) / sizeof(struct ac97_frame);
#endif
return curr;
}
static int bf5xx_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
pr_debug("%s enter\n", __func__);
snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware);
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
goto out;
if (sport_handle != NULL)
runtime->private_data = sport_handle;
else {
pr_err("sport_handle is NULL\n");
return -1;
}
return 0;
out:
return ret;
}
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
static int bf5xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
size_t size = vma->vm_end - vma->vm_start;
vma->vm_start = (unsigned long)runtime->dma_area;
vma->vm_end = vma->vm_start + size;
vma->vm_flags |= VM_SHARED;
return 0 ;
}
#else
static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos,
void __user *buf, snd_pcm_uframes_t count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int chan_mask = ac97_chan_mask[runtime->channels - 1];
pr_debug("%s copy pos:0x%lx count:0x%lx\n",
substream->stream ? "Capture" : "Playback", pos, count);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
bf5xx_pcm_to_ac97((struct ac97_frame *)runtime->dma_area + pos,
(__u16 *)buf, count, chan_mask);
else
bf5xx_ac97_to_pcm((struct ac97_frame *)runtime->dma_area + pos,
(__u16 *)buf, count);
return 0;
}
#endif
static struct snd_pcm_ops bf5xx_pcm_ac97_ops = {
.open = bf5xx_pcm_open,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = bf5xx_pcm_hw_params,
.hw_free = bf5xx_pcm_hw_free,
.prepare = bf5xx_pcm_prepare,
.trigger = bf5xx_pcm_trigger,
.pointer = bf5xx_pcm_pointer,
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
.mmap = bf5xx_pcm_mmap,
#else
.copy = bf5xx_pcm_copy,
#endif
};
static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = bf5xx_pcm_hardware.buffer_bytes_max
* sizeof(struct ac97_frame) / 4;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
buf->area = dma_alloc_coherent(pcm->card->dev, size,
&buf->addr, GFP_KERNEL);
if (!buf->area) {
pr_err("Failed to allocate dma memory\n");
pr_err("Please increase uncached DMA memory region\n");
return -ENOMEM;
}
buf->bytes = size;
pr_debug("%s, area:%p, size:0x%08lx\n", __func__,
buf->area, buf->bytes);
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
sport_handle->tx_buf = buf->area;
else
sport_handle->rx_buf = buf->area;
/*
* Need to allocate local buffer when enable
* MMAP for SPORT working in TMD mode (include AC97).
*/
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (!sport_handle->tx_dma_buf) {
sport_handle->tx_dma_buf = dma_alloc_coherent(NULL, \
size, &sport_handle->tx_dma_phy, GFP_KERNEL);
if (!sport_handle->tx_dma_buf) {
pr_err("Failed to allocate memory for tx dma buf - Please increase uncached DMA memory region\n");
return -ENOMEM;
} else
memset(sport_handle->tx_dma_buf, 0, size);
} else
memset(sport_handle->tx_dma_buf, 0, size);
} else {
if (!sport_handle->rx_dma_buf) {
sport_handle->rx_dma_buf = dma_alloc_coherent(NULL, \
size, &sport_handle->rx_dma_phy, GFP_KERNEL);
if (!sport_handle->rx_dma_buf) {
pr_err("Failed to allocate memory for rx dma buf - Please increase uncached DMA memory region\n");
return -ENOMEM;
} else
memset(sport_handle->rx_dma_buf, 0, size);
} else
memset(sport_handle->rx_dma_buf, 0, size);
}
#endif
return 0;
}
static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
size_t size = bf5xx_pcm_hardware.buffer_bytes_max *
sizeof(struct ac97_frame) / 4;
#endif
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_coherent(NULL, buf->bytes, buf->area, 0);
buf->area = NULL;
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (sport_handle->tx_dma_buf)
dma_free_coherent(NULL, size, \
sport_handle->tx_dma_buf, 0);
sport_handle->tx_dma_buf = NULL;
} else {
if (sport_handle->rx_dma_buf)
dma_free_coherent(NULL, size, \
sport_handle->rx_dma_buf, 0);
sport_handle->rx_dma_buf = NULL;
}
#endif
}
}
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
pr_debug("%s enter\n", __func__);
if (!card->dev->dma_mask)
card->dev->dma_mask = &bf5xx_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
goto out;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
goto out;
}
out:
return ret;
}
static struct snd_soc_platform_driver bf5xx_ac97_soc_platform = {
.ops = &bf5xx_pcm_ac97_ops,
.pcm_new = bf5xx_pcm_ac97_new,
.pcm_free = bf5xx_pcm_free_dma_buffers,
};
static int __devinit bf5xx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &bf5xx_ac97_soc_platform);
}
static int __devexit bf5xx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver bf5xx_pcm_driver = {
.driver = {
.name = "bfin-ac97-pcm-audio",
.owner = THIS_MODULE,
},
.probe = bf5xx_soc_platform_probe,
.remove = __devexit_p(bf5xx_soc_platform_remove),
};
module_platform_driver(bf5xx_pcm_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("ADI Blackfin AC97 PCM DMA module");
MODULE_LICENSE("GPL");
| gpl-2.0 |
littlelerroyy/android_kernel_htc_pyramid | drivers/gpio/gpio-vx855.c | 5075 | 8475 | /*
* Linux GPIOlib driver for the VIA VX855 integrated southbridge GPIO
*
* Copyright (C) 2009 VIA Technologies, Inc.
* Copyright (C) 2010 One Laptop per Child
* Author: Harald Welte <HaraldWelte@viatech.com>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/io.h>
#define MODULE_NAME "vx855_gpio"
/* The VX855 south bridge has the following GPIO pins:
* GPI 0...13 General Purpose Input
* GPO 0...12 General Purpose Output
* GPIO 0...14 General Purpose I/O (Open-Drain)
*/
#define NR_VX855_GPI 14
#define NR_VX855_GPO 13
#define NR_VX855_GPIO 15
#define NR_VX855_GPInO (NR_VX855_GPI + NR_VX855_GPO)
#define NR_VX855_GP (NR_VX855_GPI + NR_VX855_GPO + NR_VX855_GPIO)
struct vx855_gpio {
struct gpio_chip gpio;
spinlock_t lock;
u32 io_gpi;
u32 io_gpo;
bool gpi_reserved;
bool gpo_reserved;
};
/* resolve a GPIx into the corresponding bit position */
static inline u_int32_t gpi_i_bit(int i)
{
if (i < 10)
return 1 << i;
else
return 1 << (i + 14);
}
static inline u_int32_t gpo_o_bit(int i)
{
if (i < 11)
return 1 << i;
else
return 1 << (i + 14);
}
static inline u_int32_t gpio_i_bit(int i)
{
if (i < 14)
return 1 << (i + 10);
else
return 1 << (i + 14);
}
static inline u_int32_t gpio_o_bit(int i)
{
if (i < 14)
return 1 << (i + 11);
else
return 1 << (i + 13);
}
/* Mapping betwee numeric GPIO ID and the actual GPIO hardware numbering:
* 0..13 GPI 0..13
* 14..26 GPO 0..12
* 27..41 GPIO 0..14
*/
static int vx855gpio_direction_input(struct gpio_chip *gpio,
unsigned int nr)
{
struct vx855_gpio *vg = container_of(gpio, struct vx855_gpio, gpio);
unsigned long flags;
u_int32_t reg_out;
/* Real GPI bits are always in input direction */
if (nr < NR_VX855_GPI)
return 0;
/* Real GPO bits cannot be put in output direction */
if (nr < NR_VX855_GPInO)
return -EINVAL;
/* Open Drain GPIO have to be set to one */
spin_lock_irqsave(&vg->lock, flags);
reg_out = inl(vg->io_gpo);
reg_out |= gpio_o_bit(nr - NR_VX855_GPInO);
outl(reg_out, vg->io_gpo);
spin_unlock_irqrestore(&vg->lock, flags);
return 0;
}
static int vx855gpio_get(struct gpio_chip *gpio, unsigned int nr)
{
struct vx855_gpio *vg = container_of(gpio, struct vx855_gpio, gpio);
u_int32_t reg_in;
int ret = 0;
if (nr < NR_VX855_GPI) {
reg_in = inl(vg->io_gpi);
if (reg_in & gpi_i_bit(nr))
ret = 1;
} else if (nr < NR_VX855_GPInO) {
/* GPO don't have an input bit, we need to read it
* back from the output register */
reg_in = inl(vg->io_gpo);
if (reg_in & gpo_o_bit(nr - NR_VX855_GPI))
ret = 1;
} else {
reg_in = inl(vg->io_gpi);
if (reg_in & gpio_i_bit(nr - NR_VX855_GPInO))
ret = 1;
}
return ret;
}
static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
int val)
{
struct vx855_gpio *vg = container_of(gpio, struct vx855_gpio, gpio);
unsigned long flags;
u_int32_t reg_out;
/* True GPI cannot be switched to output mode */
if (nr < NR_VX855_GPI)
return;
spin_lock_irqsave(&vg->lock, flags);
reg_out = inl(vg->io_gpo);
if (nr < NR_VX855_GPInO) {
if (val)
reg_out |= gpo_o_bit(nr - NR_VX855_GPI);
else
reg_out &= ~gpo_o_bit(nr - NR_VX855_GPI);
} else {
if (val)
reg_out |= gpio_o_bit(nr - NR_VX855_GPInO);
else
reg_out &= ~gpio_o_bit(nr - NR_VX855_GPInO);
}
outl(reg_out, vg->io_gpo);
spin_unlock_irqrestore(&vg->lock, flags);
}
static int vx855gpio_direction_output(struct gpio_chip *gpio,
unsigned int nr, int val)
{
/* True GPI cannot be switched to output mode */
if (nr < NR_VX855_GPI)
return -EINVAL;
/* True GPO don't need to be switched to output mode,
* and GPIO are open-drain, i.e. also need no switching,
* so all we do is set the level */
vx855gpio_set(gpio, nr, val);
return 0;
}
static const char *vx855gpio_names[NR_VX855_GP] = {
"VX855_GPI0", "VX855_GPI1", "VX855_GPI2", "VX855_GPI3", "VX855_GPI4",
"VX855_GPI5", "VX855_GPI6", "VX855_GPI7", "VX855_GPI8", "VX855_GPI9",
"VX855_GPI10", "VX855_GPI11", "VX855_GPI12", "VX855_GPI13",
"VX855_GPO0", "VX855_GPO1", "VX855_GPO2", "VX855_GPO3", "VX855_GPO4",
"VX855_GPO5", "VX855_GPO6", "VX855_GPO7", "VX855_GPO8", "VX855_GPO9",
"VX855_GPO10", "VX855_GPO11", "VX855_GPO12",
"VX855_GPIO0", "VX855_GPIO1", "VX855_GPIO2", "VX855_GPIO3",
"VX855_GPIO4", "VX855_GPIO5", "VX855_GPIO6", "VX855_GPIO7",
"VX855_GPIO8", "VX855_GPIO9", "VX855_GPIO10", "VX855_GPIO11",
"VX855_GPIO12", "VX855_GPIO13", "VX855_GPIO14"
};
static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
{
struct gpio_chip *c = &vg->gpio;
c->label = "VX855 South Bridge";
c->owner = THIS_MODULE;
c->direction_input = vx855gpio_direction_input;
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
c->set = vx855gpio_set;
c->dbg_show = NULL;
c->base = 0;
c->ngpio = NR_VX855_GP;
c->can_sleep = 0;
c->names = vx855gpio_names;
}
/* This platform device is ordinarily registered by the vx855 mfd driver */
static __devinit int vx855gpio_probe(struct platform_device *pdev)
{
struct resource *res_gpi;
struct resource *res_gpo;
struct vx855_gpio *vg;
int ret;
res_gpi = platform_get_resource(pdev, IORESOURCE_IO, 0);
res_gpo = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (!res_gpi || !res_gpo)
return -EBUSY;
vg = kzalloc(sizeof(*vg), GFP_KERNEL);
if (!vg)
return -ENOMEM;
platform_set_drvdata(pdev, vg);
dev_info(&pdev->dev, "found VX855 GPIO controller\n");
vg->io_gpi = res_gpi->start;
vg->io_gpo = res_gpo->start;
spin_lock_init(&vg->lock);
/*
* A single byte is used to control various GPIO ports on the VX855,
* and in the case of the OLPC XO-1.5, some of those ports are used
* for switches that are interpreted and exposed through ACPI. ACPI
* will have reserved the region, so our own reservation will not
* succeed. Ignore and continue.
*/
if (!request_region(res_gpi->start, resource_size(res_gpi),
MODULE_NAME "_gpi"))
dev_warn(&pdev->dev,
"GPI I/O resource busy, probably claimed by ACPI\n");
else
vg->gpi_reserved = true;
if (!request_region(res_gpo->start, resource_size(res_gpo),
MODULE_NAME "_gpo"))
dev_warn(&pdev->dev,
"GPO I/O resource busy, probably claimed by ACPI\n");
else
vg->gpo_reserved = true;
vx855gpio_gpio_setup(vg);
ret = gpiochip_add(&vg->gpio);
if (ret) {
dev_err(&pdev->dev, "failed to register GPIOs\n");
goto out_release;
}
return 0;
out_release:
if (vg->gpi_reserved)
release_region(res_gpi->start, resource_size(res_gpi));
if (vg->gpo_reserved)
release_region(res_gpi->start, resource_size(res_gpo));
platform_set_drvdata(pdev, NULL);
kfree(vg);
return ret;
}
static int __devexit vx855gpio_remove(struct platform_device *pdev)
{
struct vx855_gpio *vg = platform_get_drvdata(pdev);
struct resource *res;
if (gpiochip_remove(&vg->gpio))
dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
if (vg->gpi_reserved) {
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(res->start, resource_size(res));
}
if (vg->gpo_reserved) {
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
release_region(res->start, resource_size(res));
}
platform_set_drvdata(pdev, NULL);
kfree(vg);
return 0;
}
static struct platform_driver vx855gpio_driver = {
.driver = {
.name = MODULE_NAME,
.owner = THIS_MODULE,
},
.probe = vx855gpio_probe,
.remove = __devexit_p(vx855gpio_remove),
};
module_platform_driver(vx855gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
MODULE_DESCRIPTION("GPIO driver for the VIA VX855 chipset");
MODULE_ALIAS("platform:vx855_gpio");
| gpl-2.0 |
OpenFPGAduino/linux | arch/x86/math-emu/errors.c | 12243 | 18106 | /*---------------------------------------------------------------------------+
| errors.c |
| |
| The error handling functions for wm-FPU-emu |
| |
| Copyright (C) 1992,1993,1994,1996 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail billm@jacobi.maths.monash.edu.au |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
#include <linux/signal.h>
#include <asm/uaccess.h>
#include "fpu_emu.h"
#include "fpu_system.h"
#include "exception.h"
#include "status_w.h"
#include "control_w.h"
#include "reg_constant.h"
#include "version.h"
/* */
#undef PRINT_MESSAGES
/* */
#if 0
void Un_impl(void)
{
u_char byte1, FPU_modrm;
unsigned long address = FPU_ORIG_EIP;
RE_ENTRANT_CHECK_OFF;
/* No need to check access_ok(), we have previously fetched these bytes. */
printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *)address);
if (FPU_CS == __USER_CS) {
while (1) {
FPU_get_user(byte1, (u_char __user *) address);
if ((byte1 & 0xf8) == 0xd8)
break;
printk("[%02x]", byte1);
address++;
}
printk("%02x ", byte1);
FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
if (FPU_modrm >= 0300)
printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8,
FPU_modrm & 7);
else
printk("/%d\n", (FPU_modrm >> 3) & 7);
} else {
printk("cs selector = %04x\n", FPU_CS);
}
RE_ENTRANT_CHECK_ON;
EXCEPTION(EX_Invalid);
}
#endif /* 0 */
/*
Called for opcodes which are illegal and which are known to result in a
SIGILL with a real 80486.
*/
void FPU_illegal(void)
{
math_abort(FPU_info, SIGILL);
}
void FPU_printall(void)
{
int i;
static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty",
"DeNorm", "Inf", "NaN"
};
u_char byte1, FPU_modrm;
unsigned long address = FPU_ORIG_EIP;
RE_ENTRANT_CHECK_OFF;
/* No need to check access_ok(), we have previously fetched these bytes. */
printk("At %p:", (void *)address);
if (FPU_CS == __USER_CS) {
#define MAX_PRINTED_BYTES 20
for (i = 0; i < MAX_PRINTED_BYTES; i++) {
FPU_get_user(byte1, (u_char __user *) address);
if ((byte1 & 0xf8) == 0xd8) {
printk(" %02x", byte1);
break;
}
printk(" [%02x]", byte1);
address++;
}
if (i == MAX_PRINTED_BYTES)
printk(" [more..]\n");
else {
FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
if (FPU_modrm >= 0300)
printk(" %02x (%02x+%d)\n", FPU_modrm,
FPU_modrm & 0xf8, FPU_modrm & 7);
else
printk(" /%d, mod=%d rm=%d\n",
(FPU_modrm >> 3) & 7,
(FPU_modrm >> 6) & 3, FPU_modrm & 7);
}
} else {
printk("%04x\n", FPU_CS);
}
partial_status = status_word();
#ifdef DEBUGGING
if (partial_status & SW_Backward)
printk("SW: backward compatibility\n");
if (partial_status & SW_C3)
printk("SW: condition bit 3\n");
if (partial_status & SW_C2)
printk("SW: condition bit 2\n");
if (partial_status & SW_C1)
printk("SW: condition bit 1\n");
if (partial_status & SW_C0)
printk("SW: condition bit 0\n");
if (partial_status & SW_Summary)
printk("SW: exception summary\n");
if (partial_status & SW_Stack_Fault)
printk("SW: stack fault\n");
if (partial_status & SW_Precision)
printk("SW: loss of precision\n");
if (partial_status & SW_Underflow)
printk("SW: underflow\n");
if (partial_status & SW_Overflow)
printk("SW: overflow\n");
if (partial_status & SW_Zero_Div)
printk("SW: divide by zero\n");
if (partial_status & SW_Denorm_Op)
printk("SW: denormalized operand\n");
if (partial_status & SW_Invalid)
printk("SW: invalid operation\n");
#endif /* DEBUGGING */
printk(" SW: b=%d st=%d es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n", partial_status & 0x8000 ? 1 : 0, /* busy */
(partial_status & 0x3800) >> 11, /* stack top pointer */
partial_status & 0x80 ? 1 : 0, /* Error summary status */
partial_status & 0x40 ? 1 : 0, /* Stack flag */
partial_status & SW_C3 ? 1 : 0, partial_status & SW_C2 ? 1 : 0, /* cc */
partial_status & SW_C1 ? 1 : 0, partial_status & SW_C0 ? 1 : 0, /* cc */
partial_status & SW_Precision ? 1 : 0,
partial_status & SW_Underflow ? 1 : 0,
partial_status & SW_Overflow ? 1 : 0,
partial_status & SW_Zero_Div ? 1 : 0,
partial_status & SW_Denorm_Op ? 1 : 0,
partial_status & SW_Invalid ? 1 : 0);
printk(" CW: ic=%d rc=%d%d pc=%d%d iem=%d ef=%d%d%d%d%d%d\n",
control_word & 0x1000 ? 1 : 0,
(control_word & 0x800) >> 11, (control_word & 0x400) >> 10,
(control_word & 0x200) >> 9, (control_word & 0x100) >> 8,
control_word & 0x80 ? 1 : 0,
control_word & SW_Precision ? 1 : 0,
control_word & SW_Underflow ? 1 : 0,
control_word & SW_Overflow ? 1 : 0,
control_word & SW_Zero_Div ? 1 : 0,
control_word & SW_Denorm_Op ? 1 : 0,
control_word & SW_Invalid ? 1 : 0);
for (i = 0; i < 8; i++) {
FPU_REG *r = &st(i);
u_char tagi = FPU_gettagi(i);
switch (tagi) {
case TAG_Empty:
continue;
break;
case TAG_Zero:
case TAG_Special:
tagi = FPU_Special(r);
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
(long)(r->sigh >> 16),
(long)(r->sigh & 0xFFFF),
(long)(r->sigl >> 16),
(long)(r->sigl & 0xFFFF),
exponent(r) - EXP_BIAS + 1);
break;
default:
printk("Whoops! Error in errors.c: tag%d is %d ", i,
tagi);
continue;
break;
}
printk("%s\n", tag_desc[(int)(unsigned)tagi]);
}
RE_ENTRANT_CHECK_ON;
}
static struct {
int type;
const char *name;
} exception_names[] = {
{
EX_StackOver, "stack overflow"}, {
EX_StackUnder, "stack underflow"}, {
EX_Precision, "loss of precision"}, {
EX_Underflow, "underflow"}, {
EX_Overflow, "overflow"}, {
EX_ZeroDiv, "divide by zero"}, {
EX_Denormal, "denormalized operand"}, {
EX_Invalid, "invalid operation"}, {
EX_INTERNAL, "INTERNAL BUG in " FPU_VERSION}, {
0, NULL}
};
/*
EX_INTERNAL is always given with a code which indicates where the
error was detected.
Internal error types:
0x14 in fpu_etc.c
0x1nn in a *.c file:
0x101 in reg_add_sub.c
0x102 in reg_mul.c
0x104 in poly_atan.c
0x105 in reg_mul.c
0x107 in fpu_trig.c
0x108 in reg_compare.c
0x109 in reg_compare.c
0x110 in reg_add_sub.c
0x111 in fpe_entry.c
0x112 in fpu_trig.c
0x113 in errors.c
0x115 in fpu_trig.c
0x116 in fpu_trig.c
0x117 in fpu_trig.c
0x118 in fpu_trig.c
0x119 in fpu_trig.c
0x120 in poly_atan.c
0x121 in reg_compare.c
0x122 in reg_compare.c
0x123 in reg_compare.c
0x125 in fpu_trig.c
0x126 in fpu_entry.c
0x127 in poly_2xm1.c
0x128 in fpu_entry.c
0x129 in fpu_entry.c
0x130 in get_address.c
0x131 in get_address.c
0x132 in get_address.c
0x133 in get_address.c
0x140 in load_store.c
0x141 in load_store.c
0x150 in poly_sin.c
0x151 in poly_sin.c
0x160 in reg_ld_str.c
0x161 in reg_ld_str.c
0x162 in reg_ld_str.c
0x163 in reg_ld_str.c
0x164 in reg_ld_str.c
0x170 in fpu_tags.c
0x171 in fpu_tags.c
0x172 in fpu_tags.c
0x180 in reg_convert.c
0x2nn in an *.S file:
0x201 in reg_u_add.S
0x202 in reg_u_div.S
0x203 in reg_u_div.S
0x204 in reg_u_div.S
0x205 in reg_u_mul.S
0x206 in reg_u_sub.S
0x207 in wm_sqrt.S
0x208 in reg_div.S
0x209 in reg_u_sub.S
0x210 in reg_u_sub.S
0x211 in reg_u_sub.S
0x212 in reg_u_sub.S
0x213 in wm_sqrt.S
0x214 in wm_sqrt.S
0x215 in wm_sqrt.S
0x220 in reg_norm.S
0x221 in reg_norm.S
0x230 in reg_round.S
0x231 in reg_round.S
0x232 in reg_round.S
0x233 in reg_round.S
0x234 in reg_round.S
0x235 in reg_round.S
0x236 in reg_round.S
0x240 in div_Xsig.S
0x241 in div_Xsig.S
0x242 in div_Xsig.S
*/
asmlinkage void FPU_exception(int n)
{
int i, int_type;
int_type = 0; /* Needed only to stop compiler warnings */
if (n & EX_INTERNAL) {
int_type = n - EX_INTERNAL;
n = EX_INTERNAL;
/* Set lots of exception bits! */
partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward);
} else {
/* Extract only the bits which we use to set the status word */
n &= (SW_Exc_Mask);
/* Set the corresponding exception bit */
partial_status |= n;
/* Set summary bits iff exception isn't masked */
if (partial_status & ~control_word & CW_Exceptions)
partial_status |= (SW_Summary | SW_Backward);
if (n & (SW_Stack_Fault | EX_Precision)) {
if (!(n & SW_C1))
/* This bit distinguishes over- from underflow for a stack fault,
and roundup from round-down for precision loss. */
partial_status &= ~SW_C1;
}
}
RE_ENTRANT_CHECK_OFF;
if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
#ifdef PRINT_MESSAGES
/* My message from the sponsor */
printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n");
#endif /* PRINT_MESSAGES */
/* Get a name string for error reporting */
for (i = 0; exception_names[i].type; i++)
if ((exception_names[i].type & n) ==
exception_names[i].type)
break;
if (exception_names[i].type) {
#ifdef PRINT_MESSAGES
printk("FP Exception: %s!\n", exception_names[i].name);
#endif /* PRINT_MESSAGES */
} else
printk("FPU emulator: Unknown Exception: 0x%04x!\n", n);
if (n == EX_INTERNAL) {
printk("FPU emulator: Internal error type 0x%04x\n",
int_type);
FPU_printall();
}
#ifdef PRINT_MESSAGES
else
FPU_printall();
#endif /* PRINT_MESSAGES */
/*
* The 80486 generates an interrupt on the next non-control FPU
* instruction. So we need some means of flagging it.
* We use the ES (Error Summary) bit for this.
*/
}
RE_ENTRANT_CHECK_ON;
#ifdef __DEBUG__
math_abort(FPU_info, SIGFPE);
#endif /* __DEBUG__ */
}
/* Real operation attempted on a NaN. */
/* Returns < 0 if the exception is unmasked */
int real_1op_NaN(FPU_REG *a)
{
int signalling, isNaN;
isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000);
/* The default result for the case of two "equal" NaNs (signs may
differ) is chosen to reproduce 80486 behaviour */
signalling = isNaN && !(a->sigh & 0x40000000);
if (!signalling) {
if (!isNaN) { /* pseudo-NaN, or other unsupported? */
if (control_word & CW_Invalid) {
/* Masked response */
reg_copy(&CONST_QNaN, a);
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception :
0) | TAG_Special;
}
return TAG_Special;
}
if (control_word & CW_Invalid) {
/* The masked response */
if (!(a->sigh & 0x80000000)) { /* pseudo-NaN ? */
reg_copy(&CONST_QNaN, a);
}
/* ensure a Quiet NaN */
a->sigh |= 0x40000000;
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
}
/* Real operation attempted on two operands, one a NaN. */
/* Returns < 0 if the exception is unmasked */
int real_2op_NaN(FPU_REG const *b, u_char tagb,
int deststnr, FPU_REG const *defaultNaN)
{
FPU_REG *dest = &st(deststnr);
FPU_REG const *a = dest;
u_char taga = FPU_gettagi(deststnr);
FPU_REG const *x;
int signalling, unsupported;
if (taga == TAG_Special)
taga = FPU_Special(a);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
/* TW_NaN is also used for unsupported data types. */
unsupported = ((taga == TW_NaN)
&& !((exponent(a) == EXP_OVER)
&& (a->sigh & 0x80000000)))
|| ((tagb == TW_NaN)
&& !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)));
if (unsupported) {
if (control_word & CW_Invalid) {
/* Masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) |
TAG_Special;
}
if (taga == TW_NaN) {
x = a;
if (tagb == TW_NaN) {
signalling = !(a->sigh & b->sigh & 0x40000000);
if (significand(b) > significand(a))
x = b;
else if (significand(b) == significand(a)) {
/* The default result for the case of two "equal" NaNs (signs may
differ) is chosen to reproduce 80486 behaviour */
x = defaultNaN;
}
} else {
/* return the quiet version of the NaN in a */
signalling = !(a->sigh & 0x40000000);
}
} else
#ifdef PARANOID
if (tagb == TW_NaN)
#endif /* PARANOID */
{
signalling = !(b->sigh & 0x40000000);
x = b;
}
#ifdef PARANOID
else {
signalling = 0;
EXCEPTION(EX_INTERNAL | 0x113);
x = &CONST_QNaN;
}
#endif /* PARANOID */
if ((!signalling) || (control_word & CW_Invalid)) {
if (!x)
x = b;
if (!(x->sigh & 0x80000000)) /* pseudo-NaN ? */
x = &CONST_QNaN;
FPU_copy_to_regi(x, TAG_Special, deststnr);
if (!signalling)
return TAG_Special;
/* ensure a Quiet NaN */
dest->sigh |= 0x40000000;
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
}
/* Invalid arith operation on Valid registers */
/* Returns < 0 if the exception is unmasked */
asmlinkage int arith_invalid(int deststnr)
{
EXCEPTION(EX_Invalid);
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
}
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid;
}
/* Divide a finite number by zero */
asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
{
FPU_REG *dest = &st(deststnr);
int tag = TAG_Valid;
if (control_word & CW_ZeroDiv) {
/* The masked response */
FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr);
setsign(dest, sign);
tag = TAG_Special;
}
EXCEPTION(EX_ZeroDiv);
return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag;
}
/* This may be called often, so keep it lean */
int set_precision_flag(int flags)
{
if (control_word & CW_Precision) {
partial_status &= ~(SW_C1 & flags);
partial_status |= flags; /* The masked response */
return 0;
} else {
EXCEPTION(flags);
return 1;
}
}
/* This may be called often, so keep it lean */
asmlinkage void set_precision_flag_up(void)
{
if (control_word & CW_Precision)
partial_status |= (SW_Precision | SW_C1); /* The masked response */
else
EXCEPTION(EX_Precision | SW_C1);
}
/* This may be called often, so keep it lean */
asmlinkage void set_precision_flag_down(void)
{
if (control_word & CW_Precision) { /* The masked response */
partial_status &= ~SW_C1;
partial_status |= SW_Precision;
} else
EXCEPTION(EX_Precision);
}
asmlinkage int denormal_operand(void)
{
if (control_word & CW_Denormal) { /* The masked response */
partial_status |= SW_Denorm_Op;
return TAG_Special;
} else {
EXCEPTION(EX_Denormal);
return TAG_Special | FPU_Exception;
}
}
asmlinkage int arith_overflow(FPU_REG *dest)
{
int tag = TAG_Valid;
if (control_word & CW_Overflow) {
/* The masked response */
/* ###### The response here depends upon the rounding mode */
reg_copy(&CONST_INF, dest);
tag = TAG_Special;
} else {
/* Subtract the magic number from the exponent */
addexponent(dest, (-3 * (1 << 13)));
}
EXCEPTION(EX_Overflow);
if (control_word & CW_Overflow) {
/* The overflow exception is masked. */
/* By definition, precision is lost.
The roundup bit (C1) is also set because we have
"rounded" upwards to Infinity. */
EXCEPTION(EX_Precision | SW_C1);
return tag;
}
return tag;
}
asmlinkage int arith_underflow(FPU_REG *dest)
{
int tag = TAG_Valid;
if (control_word & CW_Underflow) {
/* The masked response */
if (exponent16(dest) <= EXP_UNDER - 63) {
reg_copy(&CONST_Z, dest);
partial_status &= ~SW_C1; /* Round down. */
tag = TAG_Zero;
} else {
stdexp(dest);
}
} else {
/* Add the magic number to the exponent. */
addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias);
}
EXCEPTION(EX_Underflow);
if (control_word & CW_Underflow) {
/* The underflow exception is masked. */
EXCEPTION(EX_Precision);
return tag;
}
return tag;
}
void FPU_stack_overflow(void)
{
if (control_word & CW_Invalid) {
/* The masked response */
top--;
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
}
EXCEPTION(EX_StackOver);
return;
}
void FPU_stack_underflow(void)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
}
EXCEPTION(EX_StackUnder);
return;
}
void FPU_stack_underflow_i(int i)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
}
EXCEPTION(EX_StackUnder);
return;
}
void FPU_stack_underflow_pop(int i)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
FPU_pop();
}
EXCEPTION(EX_StackUnder);
return;
}
| gpl-2.0 |
venkatarajasekhar/kernel_raybst | arch/x86/math-emu/errors.c | 12243 | 18106 | /*---------------------------------------------------------------------------+
| errors.c |
| |
| The error handling functions for wm-FPU-emu |
| |
| Copyright (C) 1992,1993,1994,1996 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail billm@jacobi.maths.monash.edu.au |
| |
| |
+---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------+
| Note: |
| The file contains code which accesses user memory. |
| Emulator static data may change when user memory is accessed, due to |
| other processes using the emulator while swapping is in progress. |
+---------------------------------------------------------------------------*/
#include <linux/signal.h>
#include <asm/uaccess.h>
#include "fpu_emu.h"
#include "fpu_system.h"
#include "exception.h"
#include "status_w.h"
#include "control_w.h"
#include "reg_constant.h"
#include "version.h"
/* */
#undef PRINT_MESSAGES
/* */
#if 0
void Un_impl(void)
{
u_char byte1, FPU_modrm;
unsigned long address = FPU_ORIG_EIP;
RE_ENTRANT_CHECK_OFF;
/* No need to check access_ok(), we have previously fetched these bytes. */
printk("Unimplemented FPU Opcode at eip=%p : ", (void __user *)address);
if (FPU_CS == __USER_CS) {
while (1) {
FPU_get_user(byte1, (u_char __user *) address);
if ((byte1 & 0xf8) == 0xd8)
break;
printk("[%02x]", byte1);
address++;
}
printk("%02x ", byte1);
FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
if (FPU_modrm >= 0300)
printk("%02x (%02x+%d)\n", FPU_modrm, FPU_modrm & 0xf8,
FPU_modrm & 7);
else
printk("/%d\n", (FPU_modrm >> 3) & 7);
} else {
printk("cs selector = %04x\n", FPU_CS);
}
RE_ENTRANT_CHECK_ON;
EXCEPTION(EX_Invalid);
}
#endif /* 0 */
/*
Called for opcodes which are illegal and which are known to result in a
SIGILL with a real 80486.
*/
void FPU_illegal(void)
{
math_abort(FPU_info, SIGILL);
}
void FPU_printall(void)
{
int i;
static const char *tag_desc[] = { "Valid", "Zero", "ERROR", "Empty",
"DeNorm", "Inf", "NaN"
};
u_char byte1, FPU_modrm;
unsigned long address = FPU_ORIG_EIP;
RE_ENTRANT_CHECK_OFF;
/* No need to check access_ok(), we have previously fetched these bytes. */
printk("At %p:", (void *)address);
if (FPU_CS == __USER_CS) {
#define MAX_PRINTED_BYTES 20
for (i = 0; i < MAX_PRINTED_BYTES; i++) {
FPU_get_user(byte1, (u_char __user *) address);
if ((byte1 & 0xf8) == 0xd8) {
printk(" %02x", byte1);
break;
}
printk(" [%02x]", byte1);
address++;
}
if (i == MAX_PRINTED_BYTES)
printk(" [more..]\n");
else {
FPU_get_user(FPU_modrm, 1 + (u_char __user *) address);
if (FPU_modrm >= 0300)
printk(" %02x (%02x+%d)\n", FPU_modrm,
FPU_modrm & 0xf8, FPU_modrm & 7);
else
printk(" /%d, mod=%d rm=%d\n",
(FPU_modrm >> 3) & 7,
(FPU_modrm >> 6) & 3, FPU_modrm & 7);
}
} else {
printk("%04x\n", FPU_CS);
}
partial_status = status_word();
#ifdef DEBUGGING
if (partial_status & SW_Backward)
printk("SW: backward compatibility\n");
if (partial_status & SW_C3)
printk("SW: condition bit 3\n");
if (partial_status & SW_C2)
printk("SW: condition bit 2\n");
if (partial_status & SW_C1)
printk("SW: condition bit 1\n");
if (partial_status & SW_C0)
printk("SW: condition bit 0\n");
if (partial_status & SW_Summary)
printk("SW: exception summary\n");
if (partial_status & SW_Stack_Fault)
printk("SW: stack fault\n");
if (partial_status & SW_Precision)
printk("SW: loss of precision\n");
if (partial_status & SW_Underflow)
printk("SW: underflow\n");
if (partial_status & SW_Overflow)
printk("SW: overflow\n");
if (partial_status & SW_Zero_Div)
printk("SW: divide by zero\n");
if (partial_status & SW_Denorm_Op)
printk("SW: denormalized operand\n");
if (partial_status & SW_Invalid)
printk("SW: invalid operation\n");
#endif /* DEBUGGING */
printk(" SW: b=%d st=%d es=%d sf=%d cc=%d%d%d%d ef=%d%d%d%d%d%d\n", partial_status & 0x8000 ? 1 : 0, /* busy */
(partial_status & 0x3800) >> 11, /* stack top pointer */
partial_status & 0x80 ? 1 : 0, /* Error summary status */
partial_status & 0x40 ? 1 : 0, /* Stack flag */
partial_status & SW_C3 ? 1 : 0, partial_status & SW_C2 ? 1 : 0, /* cc */
partial_status & SW_C1 ? 1 : 0, partial_status & SW_C0 ? 1 : 0, /* cc */
partial_status & SW_Precision ? 1 : 0,
partial_status & SW_Underflow ? 1 : 0,
partial_status & SW_Overflow ? 1 : 0,
partial_status & SW_Zero_Div ? 1 : 0,
partial_status & SW_Denorm_Op ? 1 : 0,
partial_status & SW_Invalid ? 1 : 0);
printk(" CW: ic=%d rc=%d%d pc=%d%d iem=%d ef=%d%d%d%d%d%d\n",
control_word & 0x1000 ? 1 : 0,
(control_word & 0x800) >> 11, (control_word & 0x400) >> 10,
(control_word & 0x200) >> 9, (control_word & 0x100) >> 8,
control_word & 0x80 ? 1 : 0,
control_word & SW_Precision ? 1 : 0,
control_word & SW_Underflow ? 1 : 0,
control_word & SW_Overflow ? 1 : 0,
control_word & SW_Zero_Div ? 1 : 0,
control_word & SW_Denorm_Op ? 1 : 0,
control_word & SW_Invalid ? 1 : 0);
for (i = 0; i < 8; i++) {
FPU_REG *r = &st(i);
u_char tagi = FPU_gettagi(i);
switch (tagi) {
case TAG_Empty:
continue;
break;
case TAG_Zero:
case TAG_Special:
tagi = FPU_Special(r);
case TAG_Valid:
printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
getsign(r) ? '-' : '+',
(long)(r->sigh >> 16),
(long)(r->sigh & 0xFFFF),
(long)(r->sigl >> 16),
(long)(r->sigl & 0xFFFF),
exponent(r) - EXP_BIAS + 1);
break;
default:
printk("Whoops! Error in errors.c: tag%d is %d ", i,
tagi);
continue;
break;
}
printk("%s\n", tag_desc[(int)(unsigned)tagi]);
}
RE_ENTRANT_CHECK_ON;
}
static struct {
int type;
const char *name;
} exception_names[] = {
{
EX_StackOver, "stack overflow"}, {
EX_StackUnder, "stack underflow"}, {
EX_Precision, "loss of precision"}, {
EX_Underflow, "underflow"}, {
EX_Overflow, "overflow"}, {
EX_ZeroDiv, "divide by zero"}, {
EX_Denormal, "denormalized operand"}, {
EX_Invalid, "invalid operation"}, {
EX_INTERNAL, "INTERNAL BUG in " FPU_VERSION}, {
0, NULL}
};
/*
EX_INTERNAL is always given with a code which indicates where the
error was detected.
Internal error types:
0x14 in fpu_etc.c
0x1nn in a *.c file:
0x101 in reg_add_sub.c
0x102 in reg_mul.c
0x104 in poly_atan.c
0x105 in reg_mul.c
0x107 in fpu_trig.c
0x108 in reg_compare.c
0x109 in reg_compare.c
0x110 in reg_add_sub.c
0x111 in fpe_entry.c
0x112 in fpu_trig.c
0x113 in errors.c
0x115 in fpu_trig.c
0x116 in fpu_trig.c
0x117 in fpu_trig.c
0x118 in fpu_trig.c
0x119 in fpu_trig.c
0x120 in poly_atan.c
0x121 in reg_compare.c
0x122 in reg_compare.c
0x123 in reg_compare.c
0x125 in fpu_trig.c
0x126 in fpu_entry.c
0x127 in poly_2xm1.c
0x128 in fpu_entry.c
0x129 in fpu_entry.c
0x130 in get_address.c
0x131 in get_address.c
0x132 in get_address.c
0x133 in get_address.c
0x140 in load_store.c
0x141 in load_store.c
0x150 in poly_sin.c
0x151 in poly_sin.c
0x160 in reg_ld_str.c
0x161 in reg_ld_str.c
0x162 in reg_ld_str.c
0x163 in reg_ld_str.c
0x164 in reg_ld_str.c
0x170 in fpu_tags.c
0x171 in fpu_tags.c
0x172 in fpu_tags.c
0x180 in reg_convert.c
0x2nn in an *.S file:
0x201 in reg_u_add.S
0x202 in reg_u_div.S
0x203 in reg_u_div.S
0x204 in reg_u_div.S
0x205 in reg_u_mul.S
0x206 in reg_u_sub.S
0x207 in wm_sqrt.S
0x208 in reg_div.S
0x209 in reg_u_sub.S
0x210 in reg_u_sub.S
0x211 in reg_u_sub.S
0x212 in reg_u_sub.S
0x213 in wm_sqrt.S
0x214 in wm_sqrt.S
0x215 in wm_sqrt.S
0x220 in reg_norm.S
0x221 in reg_norm.S
0x230 in reg_round.S
0x231 in reg_round.S
0x232 in reg_round.S
0x233 in reg_round.S
0x234 in reg_round.S
0x235 in reg_round.S
0x236 in reg_round.S
0x240 in div_Xsig.S
0x241 in div_Xsig.S
0x242 in div_Xsig.S
*/
asmlinkage void FPU_exception(int n)
{
int i, int_type;
int_type = 0; /* Needed only to stop compiler warnings */
if (n & EX_INTERNAL) {
int_type = n - EX_INTERNAL;
n = EX_INTERNAL;
/* Set lots of exception bits! */
partial_status |= (SW_Exc_Mask | SW_Summary | SW_Backward);
} else {
/* Extract only the bits which we use to set the status word */
n &= (SW_Exc_Mask);
/* Set the corresponding exception bit */
partial_status |= n;
/* Set summary bits iff exception isn't masked */
if (partial_status & ~control_word & CW_Exceptions)
partial_status |= (SW_Summary | SW_Backward);
if (n & (SW_Stack_Fault | EX_Precision)) {
if (!(n & SW_C1))
/* This bit distinguishes over- from underflow for a stack fault,
and roundup from round-down for precision loss. */
partial_status &= ~SW_C1;
}
}
RE_ENTRANT_CHECK_OFF;
if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
#ifdef PRINT_MESSAGES
/* My message from the sponsor */
printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n");
#endif /* PRINT_MESSAGES */
/* Get a name string for error reporting */
for (i = 0; exception_names[i].type; i++)
if ((exception_names[i].type & n) ==
exception_names[i].type)
break;
if (exception_names[i].type) {
#ifdef PRINT_MESSAGES
printk("FP Exception: %s!\n", exception_names[i].name);
#endif /* PRINT_MESSAGES */
} else
printk("FPU emulator: Unknown Exception: 0x%04x!\n", n);
if (n == EX_INTERNAL) {
printk("FPU emulator: Internal error type 0x%04x\n",
int_type);
FPU_printall();
}
#ifdef PRINT_MESSAGES
else
FPU_printall();
#endif /* PRINT_MESSAGES */
/*
* The 80486 generates an interrupt on the next non-control FPU
* instruction. So we need some means of flagging it.
* We use the ES (Error Summary) bit for this.
*/
}
RE_ENTRANT_CHECK_ON;
#ifdef __DEBUG__
math_abort(FPU_info, SIGFPE);
#endif /* __DEBUG__ */
}
/* Real operation attempted on a NaN. */
/* Returns < 0 if the exception is unmasked */
int real_1op_NaN(FPU_REG *a)
{
int signalling, isNaN;
isNaN = (exponent(a) == EXP_OVER) && (a->sigh & 0x80000000);
/* The default result for the case of two "equal" NaNs (signs may
differ) is chosen to reproduce 80486 behaviour */
signalling = isNaN && !(a->sigh & 0x40000000);
if (!signalling) {
if (!isNaN) { /* pseudo-NaN, or other unsupported? */
if (control_word & CW_Invalid) {
/* Masked response */
reg_copy(&CONST_QNaN, a);
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception :
0) | TAG_Special;
}
return TAG_Special;
}
if (control_word & CW_Invalid) {
/* The masked response */
if (!(a->sigh & 0x80000000)) { /* pseudo-NaN ? */
reg_copy(&CONST_QNaN, a);
}
/* ensure a Quiet NaN */
a->sigh |= 0x40000000;
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
}
/* Real operation attempted on two operands, one a NaN. */
/* Returns < 0 if the exception is unmasked */
int real_2op_NaN(FPU_REG const *b, u_char tagb,
int deststnr, FPU_REG const *defaultNaN)
{
FPU_REG *dest = &st(deststnr);
FPU_REG const *a = dest;
u_char taga = FPU_gettagi(deststnr);
FPU_REG const *x;
int signalling, unsupported;
if (taga == TAG_Special)
taga = FPU_Special(a);
if (tagb == TAG_Special)
tagb = FPU_Special(b);
/* TW_NaN is also used for unsupported data types. */
unsupported = ((taga == TW_NaN)
&& !((exponent(a) == EXP_OVER)
&& (a->sigh & 0x80000000)))
|| ((tagb == TW_NaN)
&& !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)));
if (unsupported) {
if (control_word & CW_Invalid) {
/* Masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) |
TAG_Special;
}
if (taga == TW_NaN) {
x = a;
if (tagb == TW_NaN) {
signalling = !(a->sigh & b->sigh & 0x40000000);
if (significand(b) > significand(a))
x = b;
else if (significand(b) == significand(a)) {
/* The default result for the case of two "equal" NaNs (signs may
differ) is chosen to reproduce 80486 behaviour */
x = defaultNaN;
}
} else {
/* return the quiet version of the NaN in a */
signalling = !(a->sigh & 0x40000000);
}
} else
#ifdef PARANOID
if (tagb == TW_NaN)
#endif /* PARANOID */
{
signalling = !(b->sigh & 0x40000000);
x = b;
}
#ifdef PARANOID
else {
signalling = 0;
EXCEPTION(EX_INTERNAL | 0x113);
x = &CONST_QNaN;
}
#endif /* PARANOID */
if ((!signalling) || (control_word & CW_Invalid)) {
if (!x)
x = b;
if (!(x->sigh & 0x80000000)) /* pseudo-NaN ? */
x = &CONST_QNaN;
FPU_copy_to_regi(x, TAG_Special, deststnr);
if (!signalling)
return TAG_Special;
/* ensure a Quiet NaN */
dest->sigh |= 0x40000000;
}
EXCEPTION(EX_Invalid);
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Special;
}
/* Invalid arith operation on Valid registers */
/* Returns < 0 if the exception is unmasked */
asmlinkage int arith_invalid(int deststnr)
{
EXCEPTION(EX_Invalid);
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, deststnr);
}
return (!(control_word & CW_Invalid) ? FPU_Exception : 0) | TAG_Valid;
}
/* Divide a finite number by zero */
asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
{
FPU_REG *dest = &st(deststnr);
int tag = TAG_Valid;
if (control_word & CW_ZeroDiv) {
/* The masked response */
FPU_copy_to_regi(&CONST_INF, TAG_Special, deststnr);
setsign(dest, sign);
tag = TAG_Special;
}
EXCEPTION(EX_ZeroDiv);
return (!(control_word & CW_ZeroDiv) ? FPU_Exception : 0) | tag;
}
/* This may be called often, so keep it lean */
int set_precision_flag(int flags)
{
if (control_word & CW_Precision) {
partial_status &= ~(SW_C1 & flags);
partial_status |= flags; /* The masked response */
return 0;
} else {
EXCEPTION(flags);
return 1;
}
}
/* This may be called often, so keep it lean */
asmlinkage void set_precision_flag_up(void)
{
if (control_word & CW_Precision)
partial_status |= (SW_Precision | SW_C1); /* The masked response */
else
EXCEPTION(EX_Precision | SW_C1);
}
/* This may be called often, so keep it lean */
asmlinkage void set_precision_flag_down(void)
{
if (control_word & CW_Precision) { /* The masked response */
partial_status &= ~SW_C1;
partial_status |= SW_Precision;
} else
EXCEPTION(EX_Precision);
}
asmlinkage int denormal_operand(void)
{
if (control_word & CW_Denormal) { /* The masked response */
partial_status |= SW_Denorm_Op;
return TAG_Special;
} else {
EXCEPTION(EX_Denormal);
return TAG_Special | FPU_Exception;
}
}
asmlinkage int arith_overflow(FPU_REG *dest)
{
int tag = TAG_Valid;
if (control_word & CW_Overflow) {
/* The masked response */
/* ###### The response here depends upon the rounding mode */
reg_copy(&CONST_INF, dest);
tag = TAG_Special;
} else {
/* Subtract the magic number from the exponent */
addexponent(dest, (-3 * (1 << 13)));
}
EXCEPTION(EX_Overflow);
if (control_word & CW_Overflow) {
/* The overflow exception is masked. */
/* By definition, precision is lost.
The roundup bit (C1) is also set because we have
"rounded" upwards to Infinity. */
EXCEPTION(EX_Precision | SW_C1);
return tag;
}
return tag;
}
asmlinkage int arith_underflow(FPU_REG *dest)
{
int tag = TAG_Valid;
if (control_word & CW_Underflow) {
/* The masked response */
if (exponent16(dest) <= EXP_UNDER - 63) {
reg_copy(&CONST_Z, dest);
partial_status &= ~SW_C1; /* Round down. */
tag = TAG_Zero;
} else {
stdexp(dest);
}
} else {
/* Add the magic number to the exponent. */
addexponent(dest, (3 * (1 << 13)) + EXTENDED_Ebias);
}
EXCEPTION(EX_Underflow);
if (control_word & CW_Underflow) {
/* The underflow exception is masked. */
EXCEPTION(EX_Precision);
return tag;
}
return tag;
}
void FPU_stack_overflow(void)
{
if (control_word & CW_Invalid) {
/* The masked response */
top--;
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
}
EXCEPTION(EX_StackOver);
return;
}
void FPU_stack_underflow(void)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_reg0(&CONST_QNaN, TAG_Special);
}
EXCEPTION(EX_StackUnder);
return;
}
void FPU_stack_underflow_i(int i)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
}
EXCEPTION(EX_StackUnder);
return;
}
void FPU_stack_underflow_pop(int i)
{
if (control_word & CW_Invalid) {
/* The masked response */
FPU_copy_to_regi(&CONST_QNaN, TAG_Special, i);
FPU_pop();
}
EXCEPTION(EX_StackUnder);
return;
}
| gpl-2.0 |
fredvj/kernel_huawei_u8860 | arch/blackfin/kernel/flat.c | 13523 | 2088 | /*
* Copyright 2007 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/flat.h>
#define FLAT_BFIN_RELOC_TYPE_16_BIT 0
#define FLAT_BFIN_RELOC_TYPE_16H_BIT 1
#define FLAT_BFIN_RELOC_TYPE_32_BIT 2
unsigned long bfin_get_addr_from_rp(unsigned long *ptr,
unsigned long relval,
unsigned long flags,
unsigned long *persistent)
{
unsigned short *usptr = (unsigned short *)ptr;
int type = (relval >> 26) & 7;
unsigned long val;
switch (type) {
case FLAT_BFIN_RELOC_TYPE_16_BIT:
case FLAT_BFIN_RELOC_TYPE_16H_BIT:
usptr = (unsigned short *)ptr;
pr_debug("*usptr = %x", get_unaligned(usptr));
val = get_unaligned(usptr);
val += *persistent;
break;
case FLAT_BFIN_RELOC_TYPE_32_BIT:
pr_debug("*ptr = %lx", get_unaligned(ptr));
val = get_unaligned(ptr);
break;
default:
pr_debug("BINFMT_FLAT: Unknown relocation type %x\n", type);
return 0;
}
/*
* Stack-relative relocs contain the offset into the stack, we
* have to add the stack's start address here and return 1 from
* flat_addr_absolute to prevent the normal address calculations
*/
if (relval & (1 << 29))
return val + current->mm->context.end_brk;
if ((flags & FLAT_FLAG_GOTPIC) == 0)
val = htonl(val);
return val;
}
EXPORT_SYMBOL(bfin_get_addr_from_rp);
/*
* Insert the address ADDR into the symbol reference at RP;
* RELVAL is the raw relocation-table entry from which RP is derived
*/
void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr,
unsigned long relval)
{
unsigned short *usptr = (unsigned short *)ptr;
int type = (relval >> 26) & 7;
switch (type) {
case FLAT_BFIN_RELOC_TYPE_16_BIT:
put_unaligned(addr, usptr);
pr_debug("new value %x at %p", get_unaligned(usptr), usptr);
break;
case FLAT_BFIN_RELOC_TYPE_16H_BIT:
put_unaligned(addr >> 16, usptr);
pr_debug("new value %x", get_unaligned(usptr));
break;
case FLAT_BFIN_RELOC_TYPE_32_BIT:
put_unaligned(addr, ptr);
pr_debug("new ptr =%lx", get_unaligned(ptr));
break;
}
}
EXPORT_SYMBOL(bfin_put_addr_at_rp);
| gpl-2.0 |
navigator117/chromiumos-third_party-kernel | drivers/mtd/maps/pxa2xx-flash.c | 468 | 4322 | /*
* Map driver for Intel XScale PXA2xx platforms.
*
* Author: Nicolas Pitre
* Copyright: (C) 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <mach/hardware.h>
#include <asm/cacheflush.h>
#include <asm/mach/flash.h>
static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
ssize_t len)
{
flush_ioremap_region(map->phys, map->cached, from, len);
}
struct pxa2xx_flash_info {
struct mtd_partition *parts;
int nr_parts;
struct mtd_info *mtd;
struct map_info map;
};
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init pxa2xx_flash_probe(struct platform_device *pdev)
{
struct flash_platform_data *flash = pdev->dev.platform_data;
struct pxa2xx_flash_info *info;
struct mtd_partition *parts;
struct resource *res;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
memset(info, 0, sizeof(struct pxa2xx_flash_info));
info->map.name = (char *) flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
info->map.size = res->end - res->start + 1;
info->parts = flash->parts;
info->nr_parts = flash->nr_parts;
info->map.virt = ioremap(info->map.phys, info->map.size);
if (!info->map.virt) {
printk(KERN_WARNING "Failed to ioremap %s\n",
info->map.name);
return -ENOMEM;
}
info->map.cached =
ioremap_cached(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
info->map.inval_cache = pxa2xx_map_inval_cache;
simple_map_init(&info->map);
printk(KERN_NOTICE
"Probing %s at physical address 0x%08lx"
" (%d-bit bankwidth)\n",
info->map.name, (unsigned long)info->map.phys,
info->map.bankwidth * 8);
info->mtd = do_map_probe(flash->map_name, &info->map);
if (!info->mtd) {
iounmap((void *)info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
return -EIO;
}
info->mtd->owner = THIS_MODULE;
#ifdef CONFIG_MTD_PARTITIONS
ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
if (ret > 0) {
info->nr_parts = ret;
info->parts = parts;
}
#endif
if (info->nr_parts) {
add_mtd_partitions(info->mtd, info->parts,
info->nr_parts);
} else {
printk("Registering %s as whole device\n",
info->map.name);
add_mtd_device(info->mtd);
}
platform_set_drvdata(pdev, info);
return 0;
}
static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
platform_set_drvdata(dev, NULL);
#ifdef CONFIG_MTD_PARTITIONS
if (info->nr_parts)
del_mtd_partitions(info->mtd);
else
#endif
del_mtd_device(info->mtd);
map_destroy(info->mtd);
iounmap(info->map.virt);
if (info->map.cached)
iounmap(info->map.cached);
kfree(info->parts);
kfree(info);
return 0;
}
#ifdef CONFIG_PM
static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
if (info && info->mtd->suspend(info->mtd) == 0)
info->mtd->resume(info->mtd);
}
#else
#define pxa2xx_flash_shutdown NULL
#endif
static struct platform_driver pxa2xx_flash_driver = {
.driver = {
.name = "pxa2xx-flash",
.owner = THIS_MODULE,
},
.probe = pxa2xx_flash_probe,
.remove = __devexit_p(pxa2xx_flash_remove),
.shutdown = pxa2xx_flash_shutdown,
};
static int __init init_pxa2xx_flash(void)
{
return platform_driver_register(&pxa2xx_flash_driver);
}
static void __exit cleanup_pxa2xx_flash(void)
{
platform_driver_unregister(&pxa2xx_flash_driver);
}
module_init(init_pxa2xx_flash);
module_exit(cleanup_pxa2xx_flash);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
MODULE_DESCRIPTION("MTD map driver for Intel XScale PXA2xx");
| gpl-2.0 |
hillbeast/Kyorakernel-G3 | drivers/platform/x86/compal-laptop.c | 724 | 8886 | /*-*-linux-c-*-*/
/*
Copyright (C) 2008 Cezary Jackiewicz <cezary.jackiewicz (at) gmail.com>
based on MSI driver
Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
*/
/*
* comapl-laptop.c - Compal laptop support.
*
* This driver exports a few files in /sys/devices/platform/compal-laptop/:
*
* wlan - wlan subsystem state: contains 0 or 1 (rw)
*
* bluetooth - Bluetooth subsystem state: contains 0 or 1 (rw)
*
* raw - raw value taken from embedded controller register (ro)
*
* In addition to these platform device attributes the driver
* registers itself in the Linux backlight control subsystem and is
* available to userspace under /sys/class/backlight/compal-laptop/.
*
* This driver might work on other laptops produced by Compal. If you
* want to try it you can pass force=1 as argument to the module which
* will force it to load even when the DMI data doesn't identify the
* laptop as FL9x.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
#include <linux/autoconf.h>
#define COMPAL_DRIVER_VERSION "0.2.6"
#define COMPAL_LCD_LEVEL_MAX 8
#define COMPAL_EC_COMMAND_WIRELESS 0xBB
#define COMPAL_EC_COMMAND_LCD_LEVEL 0xB9
#define KILLSWITCH_MASK 0x10
#define WLAN_MASK 0x01
#define BT_MASK 0x02
static int force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
/* Hardware access */
static int set_lcd_level(int level)
{
if (level < 0 || level >= COMPAL_LCD_LEVEL_MAX)
return -EINVAL;
ec_write(COMPAL_EC_COMMAND_LCD_LEVEL, level);
return 0;
}
static int get_lcd_level(void)
{
u8 result;
ec_read(COMPAL_EC_COMMAND_LCD_LEVEL, &result);
return (int) result;
}
static int set_wlan_state(int state)
{
u8 result, value;
ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
if ((result & KILLSWITCH_MASK) == 0)
return -EINVAL;
else {
if (state)
value = (u8) (result | WLAN_MASK);
else
value = (u8) (result & ~WLAN_MASK);
ec_write(COMPAL_EC_COMMAND_WIRELESS, value);
}
return 0;
}
static int set_bluetooth_state(int state)
{
u8 result, value;
ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
if ((result & KILLSWITCH_MASK) == 0)
return -EINVAL;
else {
if (state)
value = (u8) (result | BT_MASK);
else
value = (u8) (result & ~BT_MASK);
ec_write(COMPAL_EC_COMMAND_WIRELESS, value);
}
return 0;
}
static int get_wireless_state(int *wlan, int *bluetooth)
{
u8 result;
ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
if (wlan) {
if ((result & KILLSWITCH_MASK) == 0)
*wlan = 0;
else
*wlan = result & WLAN_MASK;
}
if (bluetooth) {
if ((result & KILLSWITCH_MASK) == 0)
*bluetooth = 0;
else
*bluetooth = (result & BT_MASK) >> 1;
}
return 0;
}
/* Backlight device stuff */
static int bl_get_brightness(struct backlight_device *b)
{
return get_lcd_level();
}
static int bl_update_status(struct backlight_device *b)
{
return set_lcd_level(b->props.brightness);
}
static struct backlight_ops compalbl_ops = {
.get_brightness = bl_get_brightness,
.update_status = bl_update_status,
};
static struct backlight_device *compalbl_device;
/* Platform device */
static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, enabled;
ret = get_wireless_state(&enabled, NULL);
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", enabled);
}
static ssize_t show_raw(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 result;
ec_read(COMPAL_EC_COMMAND_WIRELESS, &result);
return sprintf(buf, "%i\n", result);
}
static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, enabled;
ret = get_wireless_state(NULL, &enabled);
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", enabled);
}
static ssize_t store_wlan_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int state, ret;
if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1))
return -EINVAL;
ret = set_wlan_state(state);
if (ret < 0)
return ret;
return count;
}
static ssize_t store_bluetooth_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int state, ret;
if (sscanf(buf, "%i", &state) != 1 || (state < 0 || state > 1))
return -EINVAL;
ret = set_bluetooth_state(state);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(bluetooth, 0644, show_bluetooth, store_bluetooth_state);
static DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan_state);
static DEVICE_ATTR(raw, 0444, show_raw, NULL);
static struct attribute *compal_attributes[] = {
&dev_attr_bluetooth.attr,
&dev_attr_wlan.attr,
&dev_attr_raw.attr,
NULL
};
static struct attribute_group compal_attribute_group = {
.attrs = compal_attributes
};
static struct platform_driver compal_driver = {
.driver = {
.name = "compal-laptop",
.owner = THIS_MODULE,
}
};
static struct platform_device *compal_device;
/* Initialization */
static int dmi_check_cb(const struct dmi_system_id *id)
{
printk(KERN_INFO "compal-laptop: Identified laptop model '%s'.\n",
id->ident);
return 0;
}
static struct dmi_system_id __initdata compal_dmi_table[] = {
{
.ident = "FL90/IFL90",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "IFL90"),
DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
},
.callback = dmi_check_cb
},
{
.ident = "FL90/IFL90",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "IFL90"),
DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"),
},
.callback = dmi_check_cb
},
{
.ident = "FL91/IFL91",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "IFL91"),
DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
},
.callback = dmi_check_cb
},
{
.ident = "FL92/JFL92",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "JFL92"),
DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
},
.callback = dmi_check_cb
},
{
.ident = "FT00/IFT00",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "IFT00"),
DMI_MATCH(DMI_BOARD_VERSION, "IFT00"),
},
.callback = dmi_check_cb
},
{ }
};
static int __init compal_init(void)
{
int ret;
if (acpi_disabled)
return -ENODEV;
if (!force && !dmi_check_system(compal_dmi_table))
return -ENODEV;
/* Register backlight stuff */
if (!acpi_video_backlight_support()) {
compalbl_device = backlight_device_register("compal-laptop", NULL, NULL,
&compalbl_ops);
if (IS_ERR(compalbl_device))
return PTR_ERR(compalbl_device);
compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1;
}
ret = platform_driver_register(&compal_driver);
if (ret)
goto fail_backlight;
/* Register platform stuff */
compal_device = platform_device_alloc("compal-laptop", -1);
if (!compal_device) {
ret = -ENOMEM;
goto fail_platform_driver;
}
ret = platform_device_add(compal_device);
if (ret)
goto fail_platform_device1;
ret = sysfs_create_group(&compal_device->dev.kobj,
&compal_attribute_group);
if (ret)
goto fail_platform_device2;
printk(KERN_INFO "compal-laptop: driver "COMPAL_DRIVER_VERSION
" successfully loaded.\n");
return 0;
fail_platform_device2:
platform_device_del(compal_device);
fail_platform_device1:
platform_device_put(compal_device);
fail_platform_driver:
platform_driver_unregister(&compal_driver);
fail_backlight:
backlight_device_unregister(compalbl_device);
return ret;
}
static void __exit compal_cleanup(void)
{
sysfs_remove_group(&compal_device->dev.kobj, &compal_attribute_group);
platform_device_unregister(compal_device);
platform_driver_unregister(&compal_driver);
backlight_device_unregister(compalbl_device);
printk(KERN_INFO "compal-laptop: driver unloaded.\n");
}
module_init(compal_init);
module_exit(compal_cleanup);
MODULE_AUTHOR("Cezary Jackiewicz");
MODULE_DESCRIPTION("Compal Laptop Support");
MODULE_VERSION(COMPAL_DRIVER_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("dmi:*:rnIFL90:rvrIFT00:*");
MODULE_ALIAS("dmi:*:rnIFL90:rvrREFERENCE:*");
MODULE_ALIAS("dmi:*:rnIFL91:rvrIFT00:*");
MODULE_ALIAS("dmi:*:rnJFL92:rvrIFT00:*");
MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*");
| gpl-2.0 |
Meninblack007/android_kernel_cyanogen_msm8916 | drivers/usb/serial/zte_ev.c | 980 | 8820 | /*
* ZTE_EV USB serial driver
*
* Copyright (C) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
* Copyright (C) 2012 Linux Foundation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver is based on code found in a ZTE_ENV patch that modified
* the usb-serial generic driver. Comments were left in that I think
* show the commands used to talk to the device, but I am not sure.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#define MAX_SETUP_DATA_SIZE 32
static void debug_data(struct device *dev, const char *function, int len,
const unsigned char *data, int result)
{
dev_dbg(dev, "result = %d\n", result);
if (result == len)
dev_dbg(dev, "%s - length = %d, data = %*ph\n", function,
len, len, data);
}
static int zte_ev_usb_serial_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_device *udev = port->serial->dev;
struct device *dev = &port->dev;
int result = 0;
int len;
unsigned char *buf;
buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* send 1st ctl cmd(CTL 21 22 01 00 00 00 00 00) */
len = 0;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0001, 0x0000, NULL, len,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 2st cmd and recieve data */
/*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 25.1.0(5)
* 16.0 DI 00 96 00 00 00 00 08
*/
len = 0x0007;
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 3 cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 30.1.0
* 16.0 DO 80 25 00 00 00 00 08 .%..... 30.2.0
*/
len = 0x0007;
buf[0] = 0x80;
buf[1] = 0x25;
buf[2] = 0x00;
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x08;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 4 cmd */
/*
* 16.0 CTL 21 22 03 00 00 00 00 00
*/
len = 0;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 5 cmd */
/*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 33.1.0
* 16.0 DI 80 25 00 00 00 00 08
*/
len = 0x0007;
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 6 cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 34.1.0
* 16.0 DO 80 25 00 00 00 00 08
*/
len = 0x0007;
buf[0] = 0x80;
buf[1] = 0x25;
buf[2] = 0x00;
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x08;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
kfree(buf);
return usb_serial_generic_open(tty, port);
}
/*
* CTL 21 22 02 00 00 00 00 00 CLASS 338.1.0
*
* 16.1 DI a1 20 00 00 00 00 02 00 02 00 . ........ 340.1.0
* 16.0 CTL 21 22 03 00 00 00 00 00 CLASS 341.1.0
*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 346.1.0(3)
* 16.0 DI 00 08 07 00 00 00 08 ....... 346.2.0
*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 349.1.0
* 16.0 DO 00 c2 01 00 00 00 08 ....... 349.2.0
*
* 16.0 CTL 21 22 03 00 00 00 00 00 CLASS 350.1.0(2)
*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 352.1.0
* 16.0 DI 00 c2 01 00 00 00 08 ....... 352.2.0
*
* 16.1 DI a1 20 00 00 00 00 02 00 02 00 . ........ 353.1.0
*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 354.1.0
* 16.0 DO 00 c2 01 00 00 00 08 ....... 354.2.0
*
* 16.0 CTL 21 22 03 00 00 00 00 00
*/
static void zte_ev_usb_serial_close(struct usb_serial_port *port)
{
struct usb_device *udev = port->serial->dev;
struct device *dev = &port->dev;
int result = 0;
int len;
unsigned char *buf;
buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
if (!buf)
return;
/* send 1st ctl cmd(CTL 21 22 02 00 00 00 00 00) */
len = 0;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0002, 0x0000, NULL, len,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */
len = 0;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 3st cmd and recieve data */
/*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 25.1.0(5)
* 16.0 DI 00 08 07 00 00 00 08
*/
len = 0x0007;
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 4 cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 30.1.0
* 16.0 DO 00 c2 01 00 00 00 08 .%..... 30.2.0
*/
len = 0x0007;
buf[0] = 0x00;
buf[1] = 0xc2;
buf[2] = 0x01;
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x08;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 5 cmd */
/*
* 16.0 CTL 21 22 03 00 00 00 00 00
*/
len = 0;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
/* send 6 cmd */
/*
* 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 33.1.0
* 16.0 DI 00 c2 01 00 00 00 08
*/
len = 0x0007;
result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x21, 0xa1,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 7 cmd */
/*
* 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 354.1.0
* 16.0 DO 00 c2 01 00 00 00 08 ....... 354.2.0
*/
len = 0x0007;
buf[0] = 0x00;
buf[1] = 0xc2;
buf[2] = 0x01;
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x08;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x20, 0x21,
0x0000, 0x0000, buf, len,
USB_CTRL_GET_TIMEOUT);
debug_data(dev, __func__, len, buf, result);
/* send 8 cmd */
/*
* 16.0 CTL 21 22 03 00 00 00 00 00
*/
len = 0;
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x22, 0x21,
0x0003, 0x0000, NULL, len,
USB_CTRL_GET_TIMEOUT);
dev_dbg(dev, "result = %d\n", result);
kfree(buf);
usb_serial_generic_close(port);
}
static const struct usb_device_id id_table[] = {
/* AC8710, AC8710T */
{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
/* AC8700 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
/* MG880 */
{ USB_DEVICE(0x19d2, 0xfffd) },
{ USB_DEVICE(0x19d2, 0xfffc) },
{ USB_DEVICE(0x19d2, 0xfffb) },
/* AC8710_V3 */
{ USB_DEVICE(0x19d2, 0xfff6) },
{ USB_DEVICE(0x19d2, 0xfff7) },
{ USB_DEVICE(0x19d2, 0xfff8) },
{ USB_DEVICE(0x19d2, 0xfff9) },
{ USB_DEVICE(0x19d2, 0xffee) },
/* AC2716, MC2716 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
/* AD3812 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
{ USB_DEVICE(0x19d2, 0xffec) },
{ USB_DEVICE(0x05C6, 0x3197) },
{ USB_DEVICE(0x05C6, 0x6000) },
{ USB_DEVICE(0x05C6, 0x9008) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver zio_device = {
.driver = {
.owner = THIS_MODULE,
.name = "zte_ev",
},
.id_table = id_table,
.num_ports = 1,
.open = zte_ev_usb_serial_open,
.close = zte_ev_usb_serial_close,
};
static struct usb_serial_driver * const serial_drivers[] = {
&zio_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
supercairos/android_kernel_doro_msm8916 | net/ipv4/tcp_cubic.c | 1748 | 14357 | /*
* TCP CUBIC: Binary Increase Congestion control for TCP v2.3
* Home page:
* http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
* This is from the implementation of CUBIC TCP in
* Sangtae Ha, Injong Rhee and Lisong Xu,
* "CUBIC: A New TCP-Friendly High-Speed TCP Variant"
* in ACM SIGOPS Operating System Review, July 2008.
* Available from:
* http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf
*
* CUBIC integrates a new slow start algorithm, called HyStart.
* The details of HyStart are presented in
* Sangtae Ha and Injong Rhee,
* "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008.
* Available from:
* http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf
*
* All testing results are available from:
* http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing
*
* Unless CUBIC is enabled and congestion window is large
* this behaves the same as the original Reno.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/math64.h>
#include <net/tcp.h>
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
*/
#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
/* Two methods of hybrid slow start */
#define HYSTART_ACK_TRAIN 0x1
#define HYSTART_DELAY 0x2
/* Number of delay samples for detecting the increase of delay */
#define HYSTART_MIN_SAMPLES 8
#define HYSTART_DELAY_MIN (4U<<3)
#define HYSTART_DELAY_MAX (16U<<3)
#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
static int fast_convergence __read_mostly = 1;
static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
static int initial_ssthresh __read_mostly;
static int bic_scale __read_mostly = 41;
static int tcp_friendliness __read_mostly = 1;
static int hystart __read_mostly = 1;
static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY;
static int hystart_low_window __read_mostly = 16;
static int hystart_ack_delta __read_mostly = 2;
static u32 cube_rtt_scale __read_mostly;
static u32 beta_scale __read_mostly;
static u64 cube_factor __read_mostly;
/* Note parameters that are used for precomputing scale factors are read-only */
module_param(fast_convergence, int, 0644);
MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
module_param(beta, int, 0644);
MODULE_PARM_DESC(beta, "beta for multiplicative increase");
module_param(initial_ssthresh, int, 0644);
MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
module_param(bic_scale, int, 0444);
MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)");
module_param(tcp_friendliness, int, 0644);
MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness");
module_param(hystart, int, 0644);
MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm");
module_param(hystart_detect, int, 0644);
MODULE_PARM_DESC(hystart_detect, "hyrbrid slow start detection mechanisms"
" 1: packet-train 2: delay 3: both packet-train and delay");
module_param(hystart_low_window, int, 0644);
MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
module_param(hystart_ack_delta, int, 0644);
MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (msecs)");
/* BIC TCP Parameters */
struct bictcp {
u32 cnt; /* increase cwnd by 1 after ACKs */
u32 last_max_cwnd; /* last maximum snd_cwnd */
u32 loss_cwnd; /* congestion window at last loss */
u32 last_cwnd; /* the last snd_cwnd */
u32 last_time; /* time when updated last_cwnd */
u32 bic_origin_point;/* origin point of bic function */
u32 bic_K; /* time to origin point from the beginning of the current epoch */
u32 delay_min; /* min delay (msec << 3) */
u32 epoch_start; /* beginning of an epoch */
u32 ack_cnt; /* number of acks */
u32 tcp_cwnd; /* estimated tcp cwnd */
#define ACK_RATIO_SHIFT 4
#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
u8 sample_cnt; /* number of samples to decide curr_rtt */
u8 found; /* the exit point is found? */
u32 round_start; /* beginning of each round */
u32 end_seq; /* end_seq of the round */
u32 last_ack; /* last time when the ACK spacing is close */
u32 curr_rtt; /* the minimum rtt of current round */
};
static inline void bictcp_reset(struct bictcp *ca)
{
ca->cnt = 0;
ca->last_max_cwnd = 0;
ca->last_cwnd = 0;
ca->last_time = 0;
ca->bic_origin_point = 0;
ca->bic_K = 0;
ca->delay_min = 0;
ca->epoch_start = 0;
ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
ca->ack_cnt = 0;
ca->tcp_cwnd = 0;
ca->found = 0;
}
static inline u32 bictcp_clock(void)
{
#if HZ < 1000
return ktime_to_ms(ktime_get_real());
#else
return jiffies_to_msecs(jiffies);
#endif
}
static inline void bictcp_hystart_reset(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
ca->round_start = ca->last_ack = bictcp_clock();
ca->end_seq = tp->snd_nxt;
ca->curr_rtt = 0;
ca->sample_cnt = 0;
}
static void bictcp_init(struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca);
ca->loss_cwnd = 0;
if (hystart)
bictcp_hystart_reset(sk);
if (!hystart && initial_ssthresh)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
}
/* calculate the cubic root of x using a table lookup followed by one
* Newton-Raphson iteration.
* Avg err ~= 0.195%
*/
static u32 cubic_root(u64 a)
{
u32 x, b, shift;
/*
* cbrt(x) MSB values for x MSB values in [0..63].
* Precomputed then refined by hand - Willy Tarreau
*
* For x in [0..63],
* v = cbrt(x << 18) - 1
* cbrt(x) = (v[x] + 10) >> 6
*/
static const u8 v[] = {
/* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
/* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
/* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
/* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
/* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
/* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
/* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
/* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
};
b = fls64(a);
if (b < 7) {
/* a in [0..63] */
return ((u32)v[(u32)a] + 35) >> 6;
}
b = ((b * 84) >> 8) - 1;
shift = (a >> (b * 3));
x = ((u32)(((u32)v[shift] + 10) << b)) >> 6;
/*
* Newton-Raphson iteration
* 2
* x = ( 2 * x + a / x ) / 3
* k+1 k k
*/
x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
x = ((x * 341) >> 10);
return x;
}
/*
* Compute congestion window to use.
*/
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
u32 delta, bic_target, max_cnt;
u64 offs, t;
ca->ack_cnt++; /* count the number of ACKs */
if (ca->last_cwnd == cwnd &&
(s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
return;
ca->last_cwnd = cwnd;
ca->last_time = tcp_time_stamp;
if (ca->epoch_start == 0) {
ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */
ca->ack_cnt = 1; /* start counting */
ca->tcp_cwnd = cwnd; /* syn with cubic */
if (ca->last_max_cwnd <= cwnd) {
ca->bic_K = 0;
ca->bic_origin_point = cwnd;
} else {
/* Compute new K based on
* (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
*/
ca->bic_K = cubic_root(cube_factor
* (ca->last_max_cwnd - cwnd));
ca->bic_origin_point = ca->last_max_cwnd;
}
}
/* cubic function - calc*/
/* calculate c * time^3 / rtt,
* while considering overflow in calculation of time^3
* (so time^3 is done by using 64 bit)
* and without the support of division of 64bit numbers
* (so all divisions are done by using 32 bit)
* also NOTE the unit of those veriables
* time = (t - K) / 2^bictcp_HZ
* c = bic_scale >> 10
* rtt = (srtt >> 3) / HZ
* !!! The following code does not have overflow problems,
* if the cwnd < 1 million packets !!!
*/
t = (s32)(tcp_time_stamp - ca->epoch_start);
t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
t <<= BICTCP_HZ;
do_div(t, HZ);
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
else
offs = t - ca->bic_K;
/* c/rtt * (t-K)^3 */
delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
if (t < ca->bic_K) /* below origin*/
bic_target = ca->bic_origin_point - delta;
else /* above origin*/
bic_target = ca->bic_origin_point + delta;
/* cubic function - calc bictcp_cnt*/
if (bic_target > cwnd) {
ca->cnt = cwnd / (bic_target - cwnd);
} else {
ca->cnt = 100 * cwnd; /* very small increment*/
}
/*
* The initial growth of cubic function may be too conservative
* when the available bandwidth is still unknown.
*/
if (ca->last_max_cwnd == 0 && ca->cnt > 20)
ca->cnt = 20; /* increase cwnd 5% per RTT */
/* TCP Friendly */
if (tcp_friendliness) {
u32 scale = beta_scale;
delta = (cwnd * scale) >> 3;
while (ca->ack_cnt > delta) { /* update tcp cwnd */
ca->ack_cnt -= delta;
ca->tcp_cwnd++;
}
if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */
delta = ca->tcp_cwnd - cwnd;
max_cnt = cwnd / delta;
if (ca->cnt > max_cnt)
ca->cnt = max_cnt;
}
}
ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
if (ca->cnt == 0) /* cannot be zero */
ca->cnt = 1;
}
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
if (tp->snd_cwnd <= tp->snd_ssthresh) {
if (hystart && after(ack, ca->end_seq))
bictcp_hystart_reset(sk);
tcp_slow_start(tp);
} else {
bictcp_update(ca, tp->snd_cwnd);
tcp_cong_avoid_ai(tp, ca->cnt);
}
}
static u32 bictcp_recalc_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
ca->epoch_start = 0; /* end of epoch */
/* Wmax and fast convergence */
if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
/ (2 * BICTCP_BETA_SCALE);
else
ca->last_max_cwnd = tp->snd_cwnd;
ca->loss_cwnd = tp->snd_cwnd;
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
}
static u32 bictcp_undo_cwnd(struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
static void bictcp_state(struct sock *sk, u8 new_state)
{
if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk));
bictcp_hystart_reset(sk);
}
}
static void hystart_update(struct sock *sk, u32 delay)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
if (!(ca->found & hystart_detect)) {
u32 now = bictcp_clock();
/* first detection parameter - ack-train detection */
if ((s32)(now - ca->last_ack) <= hystart_ack_delta) {
ca->last_ack = now;
if ((s32)(now - ca->round_start) > ca->delay_min >> 4)
ca->found |= HYSTART_ACK_TRAIN;
}
/* obtain the minimum delay of more than sampling packets */
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
ca->curr_rtt = delay;
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
HYSTART_DELAY_THRESH(ca->delay_min>>4))
ca->found |= HYSTART_DELAY;
}
/*
* Either one of two conditions are met,
* we exit from slow start immediately.
*/
if (ca->found & hystart_detect)
tp->snd_ssthresh = tp->snd_cwnd;
}
}
/* Track delayed acknowledgment ratio using sliding window
* ratio = (15*ratio + sample) / 16
*/
static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
u32 delay;
if (icsk->icsk_ca_state == TCP_CA_Open) {
u32 ratio = ca->delayed_ack;
ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
ratio += cnt;
ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
}
/* Some calls are for duplicates without timetamps */
if (rtt_us < 0)
return;
/* Discard delay samples right after fast recovery */
if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
return;
delay = (rtt_us << 3) / USEC_PER_MSEC;
if (delay == 0)
delay = 1;
/* first time call or link delay decreases */
if (ca->delay_min == 0 || ca->delay_min > delay)
ca->delay_min = delay;
/* hystart triggers when cwnd is larger than some threshold */
if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
tp->snd_cwnd >= hystart_low_window)
hystart_update(sk, delay);
}
static struct tcp_congestion_ops cubictcp __read_mostly = {
.init = bictcp_init,
.ssthresh = bictcp_recalc_ssthresh,
.cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd,
.pkts_acked = bictcp_acked,
.owner = THIS_MODULE,
.name = "cubic",
};
static int __init cubictcp_register(void)
{
BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
/* Precompute a bunch of the scaling factors that are used per-packet
* based on SRTT of 100ms
*/
beta_scale = 8*(BICTCP_BETA_SCALE+beta)/ 3 / (BICTCP_BETA_SCALE - beta);
cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
* so K = cubic_root( (wmax-cwnd)*rtt/c )
* the unit of K is bictcp_HZ=2^10, not HZ
*
* c = bic_scale >> 10
* rtt = 100ms
*
* the following code has been designed and tested for
* cwnd < 1 million packets
* RTT < 100 seconds
* HZ < 1,000,00 (corresponding to 10 nano-second)
*/
/* 1/c * 2^2*bictcp_HZ * srtt */
cube_factor = 1ull << (10+3*BICTCP_HZ); /* 2^40 */
/* divide by bic_scale and by constant Srtt (100ms) */
do_div(cube_factor, bic_scale * 10);
/* hystart needs ms clock resolution */
if (hystart && HZ < 1000)
cubictcp.flags |= TCP_CONG_RTT_STAMP;
return tcp_register_congestion_control(&cubictcp);
}
static void __exit cubictcp_unregister(void)
{
tcp_unregister_congestion_control(&cubictcp);
}
module_init(cubictcp_register);
module_exit(cubictcp_unregister);
MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CUBIC TCP");
MODULE_VERSION("2.3");
| gpl-2.0 |
TeamOrion-Devices/kernel_asus_grouper | drivers/staging/intel_sst/intelmid.c | 2772 | 29601 | /*
* intelmid.c - Intel Sound card driver for MID
*
* Copyright (C) 2008-10 Intel Corp
* Authors: Harsha Priya <priya.harsha@intel.com>
* Vinod Koul <vinod.koul@intel.com>
* Dharageswari R <dharageswari.r@intel.com>
* KP Jeeja <jeeja.kp@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* ALSA driver for Intel MID sound card chipset
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/input.h>
#include <sound/control.h>
#include <asm/mrst.h>
#include <sound/pcm.h>
#include <sound/jack.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <linux/gpio.h>
#include "intel_sst.h"
#include "intel_sst_ioctl.h"
#include "intel_sst_fw_ipc.h"
#include "intel_sst_common.h"
#include "intelmid_snd_control.h"
#include "intelmid_adc_control.h"
#include "intelmid.h"
MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
MODULE_DESCRIPTION("Intel MAD Sound card driver");
MODULE_LICENSE("GPL v2");
MODULE_SUPPORTED_DEVICE("{Intel,Intel_MAD}");
static int card_index = SNDRV_DEFAULT_IDX1;/* Index 0-MAX */
static char *card_id = SNDRV_DEFAULT_STR1; /* ID for this card */
module_param(card_index, int, 0444);
MODULE_PARM_DESC(card_index, "Index value for INTELMAD soundcard.");
module_param(card_id, charp, 0444);
MODULE_PARM_DESC(card_id, "ID string for INTELMAD soundcard.");
int sst_card_vendor_id;
int intelmid_audio_interrupt_enable;/*checkpatch fix*/
struct snd_intelmad *intelmad_drv;
#define INFO(_cpu_id, _irq_cache, _size) \
((kernel_ulong_t)&(struct snd_intelmad_probe_info) { \
.cpu_id = (_cpu_id), \
.irq_cache = (_irq_cache), \
.size = (_size), \
})
/* Data path functionalities */
static struct snd_pcm_hardware snd_intelmad_stream = {
.info = (SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_DOUBLE |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_MMAP|
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_SYNC_START),
.formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
.rates = (SNDRV_PCM_RATE_8000|
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000),
.rate_min = MIN_RATE,
.rate_max = MAX_RATE,
.channels_min = MIN_CHANNEL,
.channels_max = MAX_CHANNEL_AMIC,
.buffer_bytes_max = MAX_BUFFER,
.period_bytes_min = MIN_PERIOD_BYTES,
.period_bytes_max = MAX_PERIOD_BYTES,
.periods_min = MIN_PERIODS,
.periods_max = MAX_PERIODS,
.fifo_size = FIFO_SIZE,
};
/**
* snd_intelmad_pcm_trigger - stream activities are handled here
*
* @substream:substream for which the stream function is called
* @cmd:the stream commamd that requested from upper layer
*
* This function is called whenever an a stream activity is invoked
*/
static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
int cmd)
{
int ret_val = 0, str_id;
struct snd_intelmad *intelmaddata;
struct mad_stream_pvt *stream;
struct intel_sst_pcm_control *sst_ops;
WARN_ON(!substream);
intelmaddata = snd_pcm_substream_chip(substream);
stream = substream->runtime->private_data;
WARN_ON(!intelmaddata->sstdrv_ops);
WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
sst_ops = intelmaddata->sstdrv_ops->pcm_control;
str_id = stream->stream_info.str_id;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
pr_debug("Trigger Start\n");
ret_val = sst_ops->device_control(SST_SND_START, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = RUNNING;
stream->substream = substream;
break;
case SNDRV_PCM_TRIGGER_STOP:
pr_debug("in stop\n");
ret_val = sst_ops->device_control(SST_SND_DROP, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = DROPPED;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
pr_debug("in pause\n");
ret_val = sst_ops->device_control(SST_SND_PAUSE, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = PAUSED;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pr_debug("in pause release\n");
ret_val = sst_ops->device_control(SST_SND_RESUME, &str_id);
if (ret_val)
return ret_val;
stream->stream_status = RUNNING;
break;
default:
return -EINVAL;
}
return ret_val;
}
/**
* snd_intelmad_pcm_prepare- internal preparation before starting a stream
*
* @substream: substream for which the function is called
*
* This function is called when a stream is started for internal preparation.
*/
static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
{
struct mad_stream_pvt *stream;
int ret_val = 0;
struct snd_intelmad *intelmaddata;
pr_debug("pcm_prepare called\n");
WARN_ON(!substream);
stream = substream->runtime->private_data;
intelmaddata = snd_pcm_substream_chip(substream);
pr_debug("pb cnt = %d cap cnt = %d\n",\
intelmaddata->playback_cnt,
intelmaddata->capture_cnt);
if (stream->stream_info.str_id) {
pr_debug("Prepare called for already set stream\n");
ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
SST_SND_DROP, &stream->stream_info.str_id);
return ret_val;
}
ret_val = snd_intelmad_alloc_stream(substream);
if (ret_val < 0)
return ret_val;
stream->dbg_cum_bytes = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
intelmaddata->playback_cnt++;
else
intelmaddata->capture_cnt++;
/* return back the stream id */
snprintf(substream->pcm->id, sizeof(substream->pcm->id),
"%d", stream->stream_info.str_id);
pr_debug("stream id to user = %s\n",
substream->pcm->id);
ret_val = snd_intelmad_init_stream(substream);
if (ret_val)
return ret_val;
substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
return ret_val;
}
static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int ret_val;
pr_debug("snd_intelmad_hw_params called\n");
ret_val = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
memset(substream->runtime->dma_area, 0,
params_buffer_bytes(hw_params));
return ret_val;
}
static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
{
pr_debug("snd_intelmad_hw_free called\n");
return snd_pcm_lib_free_pages(substream);
}
/**
* snd_intelmad_pcm_pointer- to send the current buffer pointer processed by hw
*
* @substream: substream for which the function is called
*
* This function is called by ALSA framework to get the current hw buffer ptr
* when a period is elapsed
*/
static snd_pcm_uframes_t snd_intelmad_pcm_pointer
(struct snd_pcm_substream *substream)
{
/* struct snd_pcm_runtime *runtime = substream->runtime; */
struct mad_stream_pvt *stream;
struct snd_intelmad *intelmaddata;
int ret_val;
WARN_ON(!substream);
intelmaddata = snd_pcm_substream_chip(substream);
stream = substream->runtime->private_data;
if (stream->stream_status == INIT)
return 0;
ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
SST_SND_BUFFER_POINTER, &stream->stream_info);
if (ret_val) {
pr_err("error code = 0x%x\n", ret_val);
return ret_val;
}
pr_debug("samples reported out 0x%llx\n",
stream->stream_info.buffer_ptr);
pr_debug("Frame bits:: %d period_count :: %d\n",
(int)substream->runtime->frame_bits,
(int)substream->runtime->period_size);
return stream->stream_info.buffer_ptr;
}
/**
* snd_intelmad_close- to free parameteres when stream is stopped
*
* @substream: substream for which the function is called
*
* This function is called by ALSA framework when stream is stopped
*/
static int snd_intelmad_close(struct snd_pcm_substream *substream)
{
struct snd_intelmad *intelmaddata;
struct mad_stream_pvt *stream;
int ret_val = 0, str_id;
WARN_ON(!substream);
stream = substream->runtime->private_data;
str_id = stream->stream_info.str_id;
pr_debug("sst: snd_intelmad_close called for %d\n", str_id);
intelmaddata = snd_pcm_substream_chip(substream);
pr_debug("str id = %d\n", stream->stream_info.str_id);
if (stream->stream_info.str_id) {
/* SST API to actually stop/free the stream */
ret_val = intelmaddata->sstdrv_ops->pcm_control->close(str_id);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
intelmaddata->playback_cnt--;
else
intelmaddata->capture_cnt--;
}
pr_debug("snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
intelmaddata->playback_cnt, intelmaddata->capture_cnt);
kfree(substream->runtime->private_data);
return ret_val;
}
/**
* snd_intelmad_open- to set runtime parameters during stream start
*
* @substream: substream for which the function is called
* @type: audio device type
*
* This function is called by ALSA framework when stream is started
*/
static int snd_intelmad_open(struct snd_pcm_substream *substream,
enum snd_sst_audio_device_type type)
{
struct snd_intelmad *intelmaddata;
struct snd_pcm_runtime *runtime;
struct mad_stream_pvt *stream;
WARN_ON(!substream);
pr_debug("snd_intelmad_open called\n");
intelmaddata = snd_pcm_substream_chip(substream);
runtime = substream->runtime;
/* set the runtime hw parameter with local snd_pcm_hardware struct */
runtime->hw = snd_intelmad_stream;
if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
/*
* MRST firmware currently denies stereo recording requests.
*/
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw.formats = (SNDRV_PCM_FMTBIT_S16 |
SNDRV_PCM_FMTBIT_U16);
runtime->hw.channels_max = 1;
}
}
if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
runtime->hw = snd_intelmad_stream;
runtime->hw.rates = SNDRV_PCM_RATE_48000;
runtime->hw.rate_min = MAX_RATE;
runtime->hw.formats = (SNDRV_PCM_FMTBIT_S24 |
SNDRV_PCM_FMTBIT_U24);
if (intelmaddata->sstdrv_ops->scard_ops->input_dev_id == AMIC)
runtime->hw.channels_max = MAX_CHANNEL_AMIC;
else
runtime->hw.channels_max = MAX_CHANNEL_DMIC;
}
/* setup the internal datastruture stream pointers based on it being
playback or capture stream */
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
return -ENOMEM;
stream->stream_info.str_id = 0;
stream->device = type;
stream->stream_status = INIT;
runtime->private_data = stream;
return snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
}
static int snd_intelmad_headset_open(struct snd_pcm_substream *substream)
{
return snd_intelmad_open(substream, SND_SST_DEVICE_HEADSET);
}
static int snd_intelmad_ihf_open(struct snd_pcm_substream *substream)
{
return snd_intelmad_open(substream, SND_SST_DEVICE_IHF);
}
static int snd_intelmad_vibra_open(struct snd_pcm_substream *substream)
{
return snd_intelmad_open(substream, SND_SST_DEVICE_VIBRA);
}
static int snd_intelmad_haptic_open(struct snd_pcm_substream *substream)
{
return snd_intelmad_open(substream, SND_SST_DEVICE_HAPTIC);
}
static struct snd_pcm_ops snd_intelmad_headset_ops = {
.open = snd_intelmad_headset_open,
.close = snd_intelmad_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_intelmad_hw_params,
.hw_free = snd_intelmad_hw_free,
.prepare = snd_intelmad_pcm_prepare,
.trigger = snd_intelmad_pcm_trigger,
.pointer = snd_intelmad_pcm_pointer,
};
static struct snd_pcm_ops snd_intelmad_ihf_ops = {
.open = snd_intelmad_ihf_open,
.close = snd_intelmad_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_intelmad_hw_params,
.hw_free = snd_intelmad_hw_free,
.prepare = snd_intelmad_pcm_prepare,
.trigger = snd_intelmad_pcm_trigger,
.pointer = snd_intelmad_pcm_pointer,
};
static struct snd_pcm_ops snd_intelmad_vibra_ops = {
.open = snd_intelmad_vibra_open,
.close = snd_intelmad_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_intelmad_hw_params,
.hw_free = snd_intelmad_hw_free,
.prepare = snd_intelmad_pcm_prepare,
.trigger = snd_intelmad_pcm_trigger,
.pointer = snd_intelmad_pcm_pointer,
};
static struct snd_pcm_ops snd_intelmad_haptic_ops = {
.open = snd_intelmad_haptic_open,
.close = snd_intelmad_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_intelmad_hw_params,
.hw_free = snd_intelmad_hw_free,
.prepare = snd_intelmad_pcm_prepare,
.trigger = snd_intelmad_pcm_trigger,
.pointer = snd_intelmad_pcm_pointer,
};
static struct snd_pcm_ops snd_intelmad_capture_ops = {
.open = snd_intelmad_headset_open,
.close = snd_intelmad_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_intelmad_hw_params,
.hw_free = snd_intelmad_hw_free,
.prepare = snd_intelmad_pcm_prepare,
.trigger = snd_intelmad_pcm_trigger,
.pointer = snd_intelmad_pcm_pointer,
};
int intelmad_get_mic_bias(void)
{
struct snd_pmic_ops *pmic_ops;
if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
return -ENODEV;
pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
if (pmic_ops && pmic_ops->pmic_get_mic_bias)
return pmic_ops->pmic_get_mic_bias(intelmad_drv);
else
return -ENODEV;
}
EXPORT_SYMBOL_GPL(intelmad_get_mic_bias);
int intelmad_set_headset_state(int state)
{
struct snd_pmic_ops *pmic_ops;
if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
return -ENODEV;
pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
if (pmic_ops && pmic_ops->pmic_set_headset_state)
return pmic_ops->pmic_set_headset_state(state);
else
return -ENODEV;
}
EXPORT_SYMBOL_GPL(intelmad_set_headset_state);
void sst_process_mad_jack_detection(struct work_struct *work)
{
u8 interrupt_status;
struct mad_jack_msg_wq *mad_jack_detect =
container_of(work, struct mad_jack_msg_wq, wq);
struct snd_intelmad *intelmaddata =
mad_jack_detect->intelmaddata;
if (!intelmaddata)
return;
interrupt_status = mad_jack_detect->intsts;
if (intelmaddata->sstdrv_ops && intelmaddata->sstdrv_ops->scard_ops
&& intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb) {
intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb(
(void *)intelmaddata, interrupt_status);
intelmaddata->sstdrv_ops->scard_ops->pmic_jack_enable();
}
kfree(mad_jack_detect);
}
/**
* snd_intelmad_intr_handler- interrupt handler
*
* @irq : irq number of the interrupt received
* @dev: device context
*
* This function is called when an interrupt is raised at the sound card
*/
static irqreturn_t snd_intelmad_intr_handler(int irq, void *dev)
{
struct snd_intelmad *intelmaddata =
(struct snd_intelmad *)dev;
u8 interrupt_status;
struct mad_jack_msg_wq *mad_jack_msg;
memcpy_fromio(&interrupt_status,
((void *)(intelmaddata->int_base)),
sizeof(u8));
mad_jack_msg = kzalloc(sizeof(*mad_jack_msg), GFP_ATOMIC);
mad_jack_msg->intsts = interrupt_status;
mad_jack_msg->intelmaddata = intelmaddata;
INIT_WORK(&mad_jack_msg->wq, sst_process_mad_jack_detection);
queue_work(intelmaddata->mad_jack_wq, &mad_jack_msg->wq);
return IRQ_HANDLED;
}
void sst_mad_send_jack_report(struct snd_jack *jack,
int buttonpressevent , int status)
{
if (!jack) {
pr_debug("MAD error jack empty\n");
} else {
snd_jack_report(jack, status);
/* button pressed and released */
if (buttonpressevent)
snd_jack_report(jack, 0);
pr_debug("MAD sending jack report Done !!!\n");
}
}
static int __devinit snd_intelmad_register_irq(
struct snd_intelmad *intelmaddata, unsigned int regbase,
unsigned int regsize)
{
int ret_val;
char *drv_name;
pr_debug("irq reg regbase 0x%x, regsize 0x%x\n",
regbase, regsize);
intelmaddata->int_base = ioremap_nocache(regbase, regsize);
if (!intelmaddata->int_base)
pr_err("Mapping of cache failed\n");
pr_debug("irq = 0x%x\n", intelmaddata->irq);
if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
drv_name = DRIVER_NAME_MFLD;
else
drv_name = DRIVER_NAME_MRST;
ret_val = request_irq(intelmaddata->irq,
snd_intelmad_intr_handler,
IRQF_SHARED, drv_name,
intelmaddata);
if (ret_val)
pr_err("cannot register IRQ\n");
return ret_val;
}
static int __devinit snd_intelmad_sst_register(
struct snd_intelmad *intelmaddata)
{
int ret_val = 0;
struct snd_pmic_ops *intelmad_vendor_ops[MAX_VENDORS] = {
&snd_pmic_ops_fs,
&snd_pmic_ops_mx,
&snd_pmic_ops_nc,
&snd_msic_ops
};
struct sc_reg_access vendor_addr = {0x00, 0x00, 0x00};
if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
ret_val = sst_sc_reg_access(&vendor_addr, PMIC_READ, 1);
if (ret_val)
return ret_val;
sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0));
pr_debug("original n extrated vendor id = 0x%x %d\n",
vendor_addr.value, sst_card_vendor_id);
if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
pr_err("vendor card not supported!!\n");
return -EIO;
}
} else
sst_card_vendor_id = 0x3;
intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
intelmaddata->sstdrv_ops->vendor_id = sst_card_vendor_id;
BUG_ON(!intelmad_vendor_ops[sst_card_vendor_id]);
intelmaddata->sstdrv_ops->scard_ops =
intelmad_vendor_ops[sst_card_vendor_id];
if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
intelmaddata->sstdrv_ops->scard_ops->pb_on = 0;
intelmaddata->sstdrv_ops->scard_ops->cap_on = 0;
intelmaddata->sstdrv_ops->scard_ops->input_dev_id = DMIC;
intelmaddata->sstdrv_ops->scard_ops->output_dev_id =
STEREO_HEADPHONE;
intelmaddata->sstdrv_ops->scard_ops->lineout_dev_id = NONE;
}
/* registering with SST driver to get access to SST APIs to use */
ret_val = register_sst_card(intelmaddata->sstdrv_ops);
if (ret_val) {
pr_err("sst card registration failed\n");
return ret_val;
}
sst_card_vendor_id = intelmaddata->sstdrv_ops->vendor_id;
intelmaddata->pmic_status = PMIC_UNINIT;
return ret_val;
}
static void snd_intelmad_page_free(struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
/* Driver Init/exit functionalities */
/**
* snd_intelmad_pcm_new - to setup pcm for the card
*
* @card: pointer to the sound card structure
* @intelmaddata: pointer to internal context
* @pb: playback count for this card
* @cap: capture count for this card
* @index: device index
*
* This function is called from probe function to set up pcm params
* and functions
*/
static int __devinit snd_intelmad_pcm_new(struct snd_card *card,
struct snd_intelmad *intelmaddata,
unsigned int pb, unsigned int cap, unsigned int index)
{
int ret_val = 0;
struct snd_pcm *pcm;
char name[32] = INTEL_MAD;
struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL;
pr_debug("called for pb %d, cp %d, idx %d\n", pb, cap, index);
ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm);
if (ret_val)
return ret_val;
/* setup the ops for playback and capture streams */
switch (index) {
case 0:
pb_ops = &snd_intelmad_headset_ops;
cap_ops = &snd_intelmad_capture_ops;
break;
case 1:
pb_ops = &snd_intelmad_ihf_ops;
cap_ops = &snd_intelmad_capture_ops;
break;
case 2:
pb_ops = &snd_intelmad_vibra_ops;
cap_ops = &snd_intelmad_capture_ops;
break;
case 3:
pb_ops = &snd_intelmad_haptic_ops;
cap_ops = &snd_intelmad_capture_ops;
break;
}
if (pb)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, pb_ops);
if (cap)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, cap_ops);
/* setup private data which can be retrieved when required */
pcm->private_data = intelmaddata;
pcm->private_free = snd_intelmad_page_free;
pcm->info_flags = 0;
strncpy(pcm->name, card->shortname, strlen(card->shortname));
/* allocate dma pages for ALSA stream operations */
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
snd_dma_continuous_data(GFP_KERNEL),
MIN_BUFFER, MAX_BUFFER);
return ret_val;
}
static int __devinit snd_intelmad_pcm(struct snd_card *card,
struct snd_intelmad *intelmaddata)
{
int ret_val = 0;
WARN_ON(!card);
WARN_ON(!intelmaddata);
pr_debug("snd_intelmad_pcm called\n");
ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0);
if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT)
return ret_val;
ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 1);
if (ret_val)
return ret_val;
ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 2);
if (ret_val)
return ret_val;
return snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 3);
}
/**
* snd_intelmad_jack- to setup jack settings of the card
*
* @intelmaddata: pointer to internal context
*
* This function is called send jack events
*/
static int snd_intelmad_jack(struct snd_intelmad *intelmaddata)
{
struct snd_jack *jack;
int retval;
pr_debug("snd_intelmad_jack called\n");
jack = &intelmaddata->jack[0].jack;
snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PHONE);
retval = snd_jack_new(intelmaddata->card, "Intel(R) MID Audio Jack",
SND_JACK_HEADPHONE | SND_JACK_HEADSET |
SW_JACK_PHYSICAL_INSERT | SND_JACK_BTN_0
| SND_JACK_BTN_1, &jack);
pr_debug("snd_intelmad_jack called\n");
if (retval < 0)
return retval;
snd_jack_report(jack, 0);
jack->private_data = jack;
intelmaddata->jack[0].jack = *jack;
return retval;
}
/**
* snd_intelmad_mixer- to setup mixer settings of the card
*
* @intelmaddata: pointer to internal context
*
* This function is called from probe function to set up mixer controls
*/
static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata)
{
struct snd_card *card;
unsigned int idx;
int ret_val = 0, max_controls = 0;
char *mixername = "IntelMAD Controls";
struct snd_kcontrol_new *controls;
WARN_ON(!intelmaddata);
card = intelmaddata->card;
strncpy(card->mixername, mixername, sizeof(card->mixername)-1);
/* add all widget controls and expose the same */
if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
max_controls = MAX_CTRL_MFLD;
controls = snd_intelmad_controls_mfld;
} else {
max_controls = MAX_CTRL_MRST;
controls = snd_intelmad_controls_mrst;
}
for (idx = 0; idx < max_controls; idx++) {
ret_val = snd_ctl_add(card,
snd_ctl_new1(&controls[idx],
intelmaddata));
pr_debug("mixer[idx]=%d added\n", idx);
if (ret_val) {
pr_err("in adding of control index = %d\n", idx);
break;
}
}
return ret_val;
}
static int snd_intelmad_dev_free(struct snd_device *device)
{
struct snd_intelmad *intelmaddata;
WARN_ON(!device);
intelmaddata = device->device_data;
pr_debug("snd_intelmad_dev_free called\n");
unregister_sst_card(intelmaddata->sstdrv_ops);
/* free allocated memory for internal context */
destroy_workqueue(intelmaddata->mad_jack_wq);
device->device_data = NULL;
kfree(intelmaddata->sstdrv_ops);
kfree(intelmaddata);
return 0;
}
static int __devinit snd_intelmad_create(
struct snd_intelmad *intelmaddata,
struct snd_card *card)
{
int ret_val;
static struct snd_device_ops ops = {
.dev_free = snd_intelmad_dev_free,
};
WARN_ON(!intelmaddata);
WARN_ON(!card);
/* ALSA api to register for the device */
ret_val = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelmaddata, &ops);
return ret_val;
}
/**
* snd_intelmad_probe- function registred for init
* @pdev : pointer to the device struture
* This function is called when the device is initialized
*/
int __devinit snd_intelmad_probe(struct platform_device *pdev)
{
struct snd_card *card;
int ret_val;
struct snd_intelmad *intelmaddata;
const struct platform_device_id *id = platform_get_device_id(pdev);
struct snd_intelmad_probe_info *info = (void *)id->driver_data;
pr_debug("probe for %s cpu_id %d\n", pdev->name, info->cpu_id);
pr_debug("rq_chache %x of size %x\n", info->irq_cache, info->size);
if (!strcmp(pdev->name, DRIVER_NAME_MRST))
pr_debug("detected MRST\n");
else if (!strcmp(pdev->name, DRIVER_NAME_MFLD))
pr_debug("detected MFLD\n");
else {
pr_err("detected unknown device abort!!\n");
return -EIO;
}
if ((info->cpu_id < CPU_CHIP_LINCROFT) ||
(info->cpu_id > CPU_CHIP_PENWELL)) {
pr_err("detected unknown cpu_id abort!!\n");
return -EIO;
}
/* allocate memory for saving internal context and working */
intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
if (!intelmaddata) {
pr_debug("mem alloctn fail\n");
return -ENOMEM;
}
intelmad_drv = intelmaddata;
/* allocate memory for LPE API set */
intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
GFP_KERNEL);
if (!intelmaddata->sstdrv_ops) {
pr_err("mem allocation for ops fail\n");
kfree(intelmaddata);
return -ENOMEM;
}
intelmaddata->cpu_id = info->cpu_id;
/* create a card instance with ALSA framework */
ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
if (ret_val) {
pr_err("snd_card_create fail\n");
goto free_allocs;
}
intelmaddata->pdev = pdev;
intelmaddata->irq = platform_get_irq(pdev, 0);
platform_set_drvdata(pdev, intelmaddata);
intelmaddata->card = card;
intelmaddata->card_id = card_id;
intelmaddata->card_index = card_index;
intelmaddata->master_mute = UNMUTE;
intelmaddata->playback_cnt = intelmaddata->capture_cnt = 0;
strncpy(card->driver, INTEL_MAD, strlen(INTEL_MAD));
strncpy(card->shortname, INTEL_MAD, strlen(INTEL_MAD));
intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
/* registering with LPE driver to get access to SST APIs to use */
ret_val = snd_intelmad_sst_register(intelmaddata);
if (ret_val) {
pr_err("snd_intelmad_sst_register failed\n");
goto set_null_data;
}
intelmaddata->pmic_status = PMIC_INIT;
ret_val = snd_intelmad_pcm(card, intelmaddata);
if (ret_val) {
pr_err("snd_intelmad_pcm failed\n");
goto free_sst;
}
ret_val = snd_intelmad_mixer(intelmaddata);
if (ret_val) {
pr_err("snd_intelmad_mixer failed\n");
goto free_card;
}
ret_val = snd_intelmad_jack(intelmaddata);
if (ret_val) {
pr_err("snd_intelmad_jack failed\n");
goto free_card;
}
intelmaddata->adc_address = mid_initialize_adc();
/*create work queue for jack interrupt*/
INIT_WORK(&intelmaddata->mad_jack_msg.wq,
sst_process_mad_jack_detection);
intelmaddata->mad_jack_wq = create_workqueue("sst_mad_jack_wq");
if (!intelmaddata->mad_jack_wq)
goto free_card;
ret_val = snd_intelmad_register_irq(intelmaddata,
info->irq_cache, info->size);
if (ret_val) {
pr_err("snd_intelmad_register_irq fail\n");
goto free_mad_jack_wq;
}
/* internal function call to register device with ALSA */
ret_val = snd_intelmad_create(intelmaddata, card);
if (ret_val) {
pr_err("snd_intelmad_create failed\n");
goto set_pvt_data;
}
card->private_data = &intelmaddata;
snd_card_set_dev(card, &pdev->dev);
ret_val = snd_card_register(card);
if (ret_val) {
pr_err("snd_card_register failed\n");
goto set_pvt_data;
}
if (pdev->dev.platform_data) {
int gpio_amp = *(int *)pdev->dev.platform_data;
if (gpio_request_one(gpio_amp, GPIOF_OUT_INIT_LOW, "amp power"))
gpio_amp = 0;
intelmaddata->sstdrv_ops->scard_ops->gpio_amp = gpio_amp;
}
pr_debug("snd_intelmad_probe complete\n");
return ret_val;
set_pvt_data:
card->private_data = NULL;
free_mad_jack_wq:
destroy_workqueue(intelmaddata->mad_jack_wq);
free_card:
snd_card_free(intelmaddata->card);
free_sst:
unregister_sst_card(intelmaddata->sstdrv_ops);
set_null_data:
platform_set_drvdata(pdev, NULL);
free_allocs:
pr_err("probe failed\n");
snd_card_free(card);
kfree(intelmaddata->sstdrv_ops);
kfree(intelmaddata);
return ret_val;
}
static int snd_intelmad_remove(struct platform_device *pdev)
{
struct snd_intelmad *intelmaddata = platform_get_drvdata(pdev);
if (intelmaddata) {
if (intelmaddata->sstdrv_ops->scard_ops->gpio_amp)
gpio_free(intelmaddata->sstdrv_ops->scard_ops->gpio_amp);
free_irq(intelmaddata->irq, intelmaddata);
snd_card_free(intelmaddata->card);
}
intelmad_drv = NULL;
platform_set_drvdata(pdev, NULL);
return 0;
}
/*********************************************************************
* Driver initialization and exit
*********************************************************************/
static const struct platform_device_id snd_intelmad_ids[] = {
{DRIVER_NAME_MRST, INFO(CPU_CHIP_LINCROFT, AUDINT_BASE, 1)},
{DRIVER_NAME_MFLD, INFO(CPU_CHIP_PENWELL, 0xFFFF7FCD, 1)},
{"", 0},
};
static struct platform_driver snd_intelmad_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "intel_mid_sound_card",
},
.id_table = snd_intelmad_ids,
.probe = snd_intelmad_probe,
.remove = __devexit_p(snd_intelmad_remove),
};
/*
* alsa_card_intelmad_init- driver init function
*
* This function is called when driver module is inserted
*/
static int __init alsa_card_intelmad_init(void)
{
pr_debug("mad_init called\n");
return platform_driver_register(&snd_intelmad_driver);
}
/**
* alsa_card_intelmad_exit- driver exit function
*
* This function is called when driver module is removed
*/
static void __exit alsa_card_intelmad_exit(void)
{
pr_debug("mad_exit called\n");
return platform_driver_unregister(&snd_intelmad_driver);
}
module_init(alsa_card_intelmad_init)
module_exit(alsa_card_intelmad_exit)
| gpl-2.0 |
EnJens/android_kernel_asus_grouper | drivers/edac/i5400_edac.c | 3028 | 39969 | /*
* Intel 5400 class Memory Controllers kernel module (Seaburg)
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Copyright (c) 2008 by:
* Ben Woodard <woodard@redhat.com>
* Mauro Carvalho Chehab <mchehab@redhat.com>
*
* Red Hat Inc. http://www.redhat.com
*
* Forked and adapted from the i5000_edac driver which was
* written by Douglas Thompson Linux Networx <norsk5@xmission.com>
*
* This module is based on the following document:
*
* Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
* http://developer.intel.com/design/chipsets/datashts/313070.htm
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
#include "edac_core.h"
/*
* Alter this version for the I5400 module when modifications are made
*/
#define I5400_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i5400_edac"
#define i5400_printk(level, fmt, arg...) \
edac_printk(level, "i5400", fmt, ##arg)
#define i5400_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
/* Limits for i5400 */
#define NUM_MTRS_PER_BRANCH 4
#define CHANNELS_PER_BRANCH 2
#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
#define MAX_CHANNELS 4
/* max possible csrows per channel */
#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
/* Device 16,
* Function 0: System Address
* Function 1: Memory Branch Map, Control, Errors Register
* Function 2: FSB Error Registers
*
* All 3 functions of Device 16 (0,1,2) share the SAME DID and
* uses PCI_DEVICE_ID_INTEL_5400_ERR for device 16 (0,1,2),
* PCI_DEVICE_ID_INTEL_5400_FBD0 and PCI_DEVICE_ID_INTEL_5400_FBD1
* for device 21 (0,1).
*/
/* OFFSETS for Function 0 */
#define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
#define MAXCH 0x56 /* Max Channel Number */
#define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
/* OFFSETS for Function 1 */
#define TOLM 0x6C
#define REDMEMB 0x7C
#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3fe00) /* bits [17:9] indicate ODD, [8:0] indicate EVEN */
#define MIR0 0x80
#define MIR1 0x84
#define AMIR0 0x8c
#define AMIR1 0x90
/* Fatal error registers */
#define FERR_FAT_FBD 0x98 /* also called as FERR_FAT_FB_DIMM at datasheet */
#define FERR_FAT_FBDCHAN (3<<28) /* channel index where the highest-order error occurred */
#define NERR_FAT_FBD 0x9c
#define FERR_NF_FBD 0xa0 /* also called as FERR_NFAT_FB_DIMM at datasheet */
/* Non-fatal error register */
#define NERR_NF_FBD 0xa4
/* Enable error mask */
#define EMASK_FBD 0xa8
#define ERR0_FBD 0xac
#define ERR1_FBD 0xb0
#define ERR2_FBD 0xb4
#define MCERR_FBD 0xb8
/* No OFFSETS for Device 16 Function 2 */
/*
* Device 21,
* Function 0: Memory Map Branch 0
*
* Device 22,
* Function 0: Memory Map Branch 1
*/
/* OFFSETS for Function 0 */
#define AMBPRESENT_0 0x64
#define AMBPRESENT_1 0x66
#define MTR0 0x80
#define MTR1 0x82
#define MTR2 0x84
#define MTR3 0x86
/* OFFSETS for Function 1 */
#define NRECFGLOG 0x74
#define RECFGLOG 0x78
#define NRECMEMA 0xbe
#define NRECMEMB 0xc0
#define NRECFB_DIMMA 0xc4
#define NRECFB_DIMMB 0xc8
#define NRECFB_DIMMC 0xcc
#define NRECFB_DIMMD 0xd0
#define NRECFB_DIMME 0xd4
#define NRECFB_DIMMF 0xd8
#define REDMEMA 0xdC
#define RECMEMA 0xf0
#define RECMEMB 0xf4
#define RECFB_DIMMA 0xf8
#define RECFB_DIMMB 0xec
#define RECFB_DIMMC 0xf0
#define RECFB_DIMMD 0xf4
#define RECFB_DIMME 0xf8
#define RECFB_DIMMF 0xfC
/*
* Error indicator bits and masks
* Error masks are according with Table 5-17 of i5400 datasheet
*/
enum error_mask {
EMASK_M1 = 1<<0, /* Memory Write error on non-redundant retry */
EMASK_M2 = 1<<1, /* Memory or FB-DIMM configuration CRC read error */
EMASK_M3 = 1<<2, /* Reserved */
EMASK_M4 = 1<<3, /* Uncorrectable Data ECC on Replay */
EMASK_M5 = 1<<4, /* Aliased Uncorrectable Non-Mirrored Demand Data ECC */
EMASK_M6 = 1<<5, /* Unsupported on i5400 */
EMASK_M7 = 1<<6, /* Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
EMASK_M8 = 1<<7, /* Aliased Uncorrectable Patrol Data ECC */
EMASK_M9 = 1<<8, /* Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC */
EMASK_M10 = 1<<9, /* Unsupported on i5400 */
EMASK_M11 = 1<<10, /* Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC */
EMASK_M12 = 1<<11, /* Non-Aliased Uncorrectable Patrol Data ECC */
EMASK_M13 = 1<<12, /* Memory Write error on first attempt */
EMASK_M14 = 1<<13, /* FB-DIMM Configuration Write error on first attempt */
EMASK_M15 = 1<<14, /* Memory or FB-DIMM configuration CRC read error */
EMASK_M16 = 1<<15, /* Channel Failed-Over Occurred */
EMASK_M17 = 1<<16, /* Correctable Non-Mirrored Demand Data ECC */
EMASK_M18 = 1<<17, /* Unsupported on i5400 */
EMASK_M19 = 1<<18, /* Correctable Resilver- or Spare-Copy Data ECC */
EMASK_M20 = 1<<19, /* Correctable Patrol Data ECC */
EMASK_M21 = 1<<20, /* FB-DIMM Northbound parity error on FB-DIMM Sync Status */
EMASK_M22 = 1<<21, /* SPD protocol Error */
EMASK_M23 = 1<<22, /* Non-Redundant Fast Reset Timeout */
EMASK_M24 = 1<<23, /* Refresh error */
EMASK_M25 = 1<<24, /* Memory Write error on redundant retry */
EMASK_M26 = 1<<25, /* Redundant Fast Reset Timeout */
EMASK_M27 = 1<<26, /* Correctable Counter Threshold Exceeded */
EMASK_M28 = 1<<27, /* DIMM-Spare Copy Completed */
EMASK_M29 = 1<<28, /* DIMM-Isolation Completed */
};
/*
* Names to translate bit error into something useful
*/
static const char *error_name[] = {
[0] = "Memory Write error on non-redundant retry",
[1] = "Memory or FB-DIMM configuration CRC read error",
/* Reserved */
[3] = "Uncorrectable Data ECC on Replay",
[4] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
/* M6 Unsupported on i5400 */
[6] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
[7] = "Aliased Uncorrectable Patrol Data ECC",
[8] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
/* M10 Unsupported on i5400 */
[10] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
[11] = "Non-Aliased Uncorrectable Patrol Data ECC",
[12] = "Memory Write error on first attempt",
[13] = "FB-DIMM Configuration Write error on first attempt",
[14] = "Memory or FB-DIMM configuration CRC read error",
[15] = "Channel Failed-Over Occurred",
[16] = "Correctable Non-Mirrored Demand Data ECC",
/* M18 Unsupported on i5400 */
[18] = "Correctable Resilver- or Spare-Copy Data ECC",
[19] = "Correctable Patrol Data ECC",
[20] = "FB-DIMM Northbound parity error on FB-DIMM Sync Status",
[21] = "SPD protocol Error",
[22] = "Non-Redundant Fast Reset Timeout",
[23] = "Refresh error",
[24] = "Memory Write error on redundant retry",
[25] = "Redundant Fast Reset Timeout",
[26] = "Correctable Counter Threshold Exceeded",
[27] = "DIMM-Spare Copy Completed",
[28] = "DIMM-Isolation Completed",
};
/* Fatal errors */
#define ERROR_FAT_MASK (EMASK_M1 | \
EMASK_M2 | \
EMASK_M23)
/* Correctable errors */
#define ERROR_NF_CORRECTABLE (EMASK_M27 | \
EMASK_M20 | \
EMASK_M19 | \
EMASK_M18 | \
EMASK_M17 | \
EMASK_M16)
#define ERROR_NF_DIMM_SPARE (EMASK_M29 | \
EMASK_M28)
#define ERROR_NF_SPD_PROTOCOL (EMASK_M22)
#define ERROR_NF_NORTH_CRC (EMASK_M21)
/* Recoverable errors */
#define ERROR_NF_RECOVERABLE (EMASK_M26 | \
EMASK_M25 | \
EMASK_M24 | \
EMASK_M15 | \
EMASK_M14 | \
EMASK_M13 | \
EMASK_M12 | \
EMASK_M11 | \
EMASK_M9 | \
EMASK_M8 | \
EMASK_M7 | \
EMASK_M5)
/* uncorrectable errors */
#define ERROR_NF_UNCORRECTABLE (EMASK_M4)
/* mask to all non-fatal errors */
#define ERROR_NF_MASK (ERROR_NF_CORRECTABLE | \
ERROR_NF_UNCORRECTABLE | \
ERROR_NF_RECOVERABLE | \
ERROR_NF_DIMM_SPARE | \
ERROR_NF_SPD_PROTOCOL | \
ERROR_NF_NORTH_CRC)
/*
* Define error masks for the several registers
*/
/* Enable all fatal and non fatal errors */
#define ENABLE_EMASK_ALL (ERROR_FAT_MASK | ERROR_NF_MASK)
/* mask for fatal error registers */
#define FERR_FAT_MASK ERROR_FAT_MASK
/* masks for non-fatal error register */
static inline int to_nf_mask(unsigned int mask)
{
return (mask & EMASK_M29) | (mask >> 3);
};
static inline int from_nf_ferr(unsigned int mask)
{
return (mask & EMASK_M29) | /* Bit 28 */
(mask & ((1 << 28) - 1) << 3); /* Bits 0 to 27 */
};
#define FERR_NF_MASK to_nf_mask(ERROR_NF_MASK)
#define FERR_NF_CORRECTABLE to_nf_mask(ERROR_NF_CORRECTABLE)
#define FERR_NF_DIMM_SPARE to_nf_mask(ERROR_NF_DIMM_SPARE)
#define FERR_NF_SPD_PROTOCOL to_nf_mask(ERROR_NF_SPD_PROTOCOL)
#define FERR_NF_NORTH_CRC to_nf_mask(ERROR_NF_NORTH_CRC)
#define FERR_NF_RECOVERABLE to_nf_mask(ERROR_NF_RECOVERABLE)
#define FERR_NF_UNCORRECTABLE to_nf_mask(ERROR_NF_UNCORRECTABLE)
/* Defines to extract the vaious fields from the
* MTRx - Memory Technology Registers
*/
#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 10))
#define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 9))
#define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 8)) ? 8 : 4)
#define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
#define MTR_DIMM_RANK(mtr) (((mtr) >> 5) & 0x1)
#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
/* This applies to FERR_NF_FB-DIMM as well as FERR_FAT_FB-DIMM */
static inline int extract_fbdchan_indx(u32 x)
{
return (x>>28) & 0x3;
}
#ifdef CONFIG_EDAC_DEBUG
/* MTR NUMROW */
static const char *numrow_toString[] = {
"8,192 - 13 rows",
"16,384 - 14 rows",
"32,768 - 15 rows",
"65,536 - 16 rows"
};
/* MTR NUMCOL */
static const char *numcol_toString[] = {
"1,024 - 10 columns",
"2,048 - 11 columns",
"4,096 - 12 columns",
"reserved"
};
#endif
/* Device name and register DID (Device ID) */
struct i5400_dev_info {
const char *ctl_name; /* name for this device */
u16 fsb_mapping_errors; /* DID for the branchmap,control */
};
/* Table of devices attributes supported by this driver */
static const struct i5400_dev_info i5400_devs[] = {
{
.ctl_name = "I5400",
.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_5400_ERR,
},
};
struct i5400_dimm_info {
int megabytes; /* size, 0 means not present */
};
/* driver private data structure */
struct i5400_pvt {
struct pci_dev *system_address; /* 16.0 */
struct pci_dev *branchmap_werrors; /* 16.1 */
struct pci_dev *fsb_error_regs; /* 16.2 */
struct pci_dev *branch_0; /* 21.0 */
struct pci_dev *branch_1; /* 22.0 */
u16 tolm; /* top of low memory */
u64 ambase; /* AMB BAR */
u16 mir0, mir1;
u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
/* DIMM information matrix, allocating architecture maximums */
struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
/* Actual values for this controller */
int maxch; /* Max channels */
int maxdimmperch; /* Max DIMMs per channel */
};
/* I5400 MCH error information retrieved from Hardware */
struct i5400_error_info {
/* These registers are always read from the MC */
u32 ferr_fat_fbd; /* First Errors Fatal */
u32 nerr_fat_fbd; /* Next Errors Fatal */
u32 ferr_nf_fbd; /* First Errors Non-Fatal */
u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
/* These registers are input ONLY if there was a Recoverable Error */
u32 redmemb; /* Recoverable Mem Data Error log B */
u16 recmema; /* Recoverable Mem Error log A */
u32 recmemb; /* Recoverable Mem Error log B */
/* These registers are input ONLY if there was a Non-Rec Error */
u16 nrecmema; /* Non-Recoverable Mem log A */
u16 nrecmemb; /* Non-Recoverable Mem log B */
};
/* note that nrec_rdwr changed from NRECMEMA to NRECMEMB between the 5000 and
5400 better to use an inline function than a macro in this case */
static inline int nrec_bank(struct i5400_error_info *info)
{
return ((info->nrecmema) >> 12) & 0x7;
}
static inline int nrec_rank(struct i5400_error_info *info)
{
return ((info->nrecmema) >> 8) & 0xf;
}
static inline int nrec_buf_id(struct i5400_error_info *info)
{
return ((info->nrecmema)) & 0xff;
}
static inline int nrec_rdwr(struct i5400_error_info *info)
{
return (info->nrecmemb) >> 31;
}
/* This applies to both NREC and REC string so it can be used with nrec_rdwr
and rec_rdwr */
static inline const char *rdwr_str(int rdwr)
{
return rdwr ? "Write" : "Read";
}
static inline int nrec_cas(struct i5400_error_info *info)
{
return ((info->nrecmemb) >> 16) & 0x1fff;
}
static inline int nrec_ras(struct i5400_error_info *info)
{
return (info->nrecmemb) & 0xffff;
}
static inline int rec_bank(struct i5400_error_info *info)
{
return ((info->recmema) >> 12) & 0x7;
}
static inline int rec_rank(struct i5400_error_info *info)
{
return ((info->recmema) >> 8) & 0xf;
}
static inline int rec_rdwr(struct i5400_error_info *info)
{
return (info->recmemb) >> 31;
}
static inline int rec_cas(struct i5400_error_info *info)
{
return ((info->recmemb) >> 16) & 0x1fff;
}
static inline int rec_ras(struct i5400_error_info *info)
{
return (info->recmemb) & 0xffff;
}
static struct edac_pci_ctl_info *i5400_pci;
/*
* i5400_get_error_info Retrieve the hardware error information from
* the hardware and cache it in the 'info'
* structure
*/
static void i5400_get_error_info(struct mem_ctl_info *mci,
struct i5400_error_info *info)
{
struct i5400_pvt *pvt;
u32 value;
pvt = mci->pvt_info;
/* read in the 1st FATAL error register */
pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
/* Mask only the bits that the doc says are valid
*/
value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
/* If there is an error, then read in the
NEXT FATAL error register and the Memory Error Log Register A
*/
if (value & FERR_FAT_MASK) {
info->ferr_fat_fbd = value;
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
NERR_FAT_FBD, &info->nerr_fat_fbd);
pci_read_config_word(pvt->branchmap_werrors,
NRECMEMA, &info->nrecmema);
pci_read_config_word(pvt->branchmap_werrors,
NRECMEMB, &info->nrecmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
FERR_FAT_FBD, value);
} else {
info->ferr_fat_fbd = 0;
info->nerr_fat_fbd = 0;
info->nrecmema = 0;
info->nrecmemb = 0;
}
/* read in the 1st NON-FATAL error register */
pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
/* If there is an error, then read in the 1st NON-FATAL error
* register as well */
if (value & FERR_NF_MASK) {
info->ferr_nf_fbd = value;
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
NERR_NF_FBD, &info->nerr_nf_fbd);
pci_read_config_word(pvt->branchmap_werrors,
RECMEMA, &info->recmema);
pci_read_config_dword(pvt->branchmap_werrors,
RECMEMB, &info->recmemb);
pci_read_config_dword(pvt->branchmap_werrors,
REDMEMB, &info->redmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
FERR_NF_FBD, value);
} else {
info->ferr_nf_fbd = 0;
info->nerr_nf_fbd = 0;
info->recmema = 0;
info->recmemb = 0;
info->redmemb = 0;
}
}
/*
* i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
* struct i5400_error_info *info,
* int handle_errors);
*
* handle the Intel FATAL and unrecoverable errors, if any
*/
static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
struct i5400_error_info *info,
unsigned long allErrors)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
int branch;
int channel;
int bank;
int buf_id;
int rank;
int rdwr;
int ras, cas;
int errnum;
char *type = NULL;
if (!allErrors)
return; /* if no error, return now */
if (allErrors & ERROR_FAT_MASK)
type = "FATAL";
else if (allErrors & FERR_NF_UNCORRECTABLE)
type = "NON-FATAL uncorrected";
else
type = "NON-FATAL recoverable";
/* ONLY ONE of the possible error bits will be set, as per the docs */
branch = extract_fbdchan_indx(info->ferr_fat_fbd);
channel = branch;
/* Use the NON-Recoverable macros to extract data */
bank = nrec_bank(info);
rank = nrec_rank(info);
buf_id = nrec_buf_id(info);
rdwr = nrec_rdwr(info);
ras = nrec_ras(info);
cas = nrec_cas(info);
debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
/* Form out message */
snprintf(msg, sizeof(msg),
"%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
"RAS=%d CAS=%d %s Err=0x%lx (%s))",
type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
type, allErrors, error_name[errnum]);
/* Call the helper to output message */
edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
}
/*
* i5400_process_fatal_error_info(struct mem_ctl_info *mci,
* struct i5400_error_info *info,
* int handle_errors);
*
* handle the Intel NON-FATAL errors, if any
*/
static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
struct i5400_error_info *info)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 90 + 80];
unsigned long allErrors;
int branch;
int channel;
int bank;
int rank;
int rdwr;
int ras, cas;
int errnum;
/* mask off the Error bits that are possible */
allErrors = from_nf_ferr(info->ferr_nf_fbd & FERR_NF_MASK);
if (!allErrors)
return; /* if no error, return now */
/* ONLY ONE of the possible error bits will be set, as per the docs */
if (allErrors & (ERROR_NF_UNCORRECTABLE | ERROR_NF_RECOVERABLE)) {
i5400_proccess_non_recoverable_info(mci, info, allErrors);
return;
}
/* Correctable errors */
if (allErrors & ERROR_NF_CORRECTABLE) {
debugf0("\tCorrected bits= 0x%lx\n", allErrors);
branch = extract_fbdchan_indx(info->ferr_nf_fbd);
channel = 0;
if (REC_ECC_LOCATOR_ODD(info->redmemb))
channel = 1;
/* Convert channel to be based from zero, instead of
* from branch base of 0 */
channel += branch;
bank = rec_bank(info);
rank = rec_rank(info);
rdwr = rec_rdwr(info);
ras = rec_ras(info);
cas = rec_cas(info);
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, branch >> 1, bank,
rdwr_str(rdwr), ras, cas);
/* Form out message */
snprintf(msg, sizeof(msg),
"Corrected error (Branch=%d DRAM-Bank=%d RDWR=%s "
"RAS=%d CAS=%d, CE Err=0x%lx (%s))",
branch >> 1, bank, rdwr_str(rdwr), ras, cas,
allErrors, error_name[errnum]);
/* Call the helper to output message */
edac_mc_handle_fbd_ce(mci, rank, channel, msg);
return;
}
/* Miscellaneous errors */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
branch = extract_fbdchan_indx(info->ferr_nf_fbd);
i5400_mc_printk(mci, KERN_EMERG,
"Non-Fatal misc error (Branch=%d Err=%#lx (%s))",
branch >> 1, allErrors, error_name[errnum]);
}
/*
* i5400_process_error_info Process the error info that is
* in the 'info' structure, previously retrieved from hardware
*/
static void i5400_process_error_info(struct mem_ctl_info *mci,
struct i5400_error_info *info)
{ u32 allErrors;
/* First handle any fatal errors that occurred */
allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
i5400_proccess_non_recoverable_info(mci, info, allErrors);
/* now handle any non-fatal errors that occurred */
i5400_process_nonfatal_error_info(mci, info);
}
/*
* i5400_clear_error Retrieve any error from the hardware
* but do NOT process that error.
* Used for 'clearing' out of previous errors
* Called by the Core module.
*/
static void i5400_clear_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
i5400_get_error_info(mci, &info);
}
/*
* i5400_check_error Retrieve and process errors reported by the
* hardware. Called by the Core module.
*/
static void i5400_check_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i5400_get_error_info(mci, &info);
i5400_process_error_info(mci, &info);
}
/*
* i5400_put_devices 'put' all the devices that we have
* reserved via 'get'
*/
static void i5400_put_devices(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
pvt = mci->pvt_info;
/* Decrement usage count for devices */
pci_dev_put(pvt->branch_1);
pci_dev_put(pvt->branch_0);
pci_dev_put(pvt->fsb_error_regs);
pci_dev_put(pvt->branchmap_werrors);
}
/*
* i5400_get_devices Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
*
* Need to 'get' device 16 func 1 and func 2
*/
static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
{
struct i5400_pvt *pvt;
struct pci_dev *pdev;
pvt = mci->pvt_info;
pvt->branchmap_werrors = NULL;
pvt->fsb_error_regs = NULL;
pvt->branch_0 = NULL;
pvt->branch_1 = NULL;
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
while (!pvt->branchmap_werrors || !pvt->fsb_error_regs) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_ERR, pdev);
if (!pdev) {
/* End of list, leave */
i5400_printk(KERN_ERR,
"'system address,Process Bus' "
"device not found:"
"vendor 0x%x device 0x%x ERR funcs "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_ERR);
goto error;
}
/* Store device 16 funcs 1 and 2 */
switch (PCI_FUNC(pdev->devfn)) {
case 1:
pvt->branchmap_werrors = pdev;
break;
case 2:
pvt->fsb_error_regs = pdev;
break;
}
}
debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->system_address),
pvt->system_address->vendor, pvt->system_address->device);
debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->branchmap_werrors),
pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
pci_name(pvt->fsb_error_regs),
pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
if (!pvt->branch_0) {
i5400_printk(KERN_ERR,
"MC: 'BRANCH 0' device not found:"
"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0);
goto error;
}
/* If this device claims to have more than 2 channels then
* fetch Branch 1's information
*/
if (pvt->maxch < CHANNELS_PER_BRANCH)
return 0;
pvt->branch_1 = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD1, NULL);
if (!pvt->branch_1) {
i5400_printk(KERN_ERR,
"MC: 'BRANCH 1' device not found:"
"vendor 0x%x device 0x%x Func 0 "
"(broken BIOS?)\n",
PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD1);
goto error;
}
return 0;
error:
i5400_put_devices(mci);
return -ENODEV;
}
/*
* determine_amb_present
*
* the information is contained in NUM_MTRS_PER_BRANCH different
* registers determining which of the NUM_MTRS_PER_BRANCH requires
* knowing which channel is in question
*
* 2 branches, each with 2 channels
* b0_ambpresent0 for channel '0'
* b0_ambpresent1 for channel '1'
* b1_ambpresent0 for channel '2'
* b1_ambpresent1 for channel '3'
*/
static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
{
int amb_present;
if (channel < CHANNELS_PER_BRANCH) {
if (channel & 0x1)
amb_present = pvt->b0_ambpresent1;
else
amb_present = pvt->b0_ambpresent0;
} else {
if (channel & 0x1)
amb_present = pvt->b1_ambpresent1;
else
amb_present = pvt->b1_ambpresent0;
}
return amb_present;
}
/*
* determine_mtr(pvt, csrow, channel)
*
* return the proper MTR register as determine by the csrow and desired channel
*/
static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
{
int mtr;
int n;
/* There is one MTR for each slot pair of FB-DIMMs,
Each slot pair may be at branch 0 or branch 1.
*/
n = csrow;
if (n >= NUM_MTRS_PER_BRANCH) {
debugf0("ERROR: trying to access an invalid csrow: %d\n",
csrow);
return 0;
}
if (channel < CHANNELS_PER_BRANCH)
mtr = pvt->b0_mtr[n];
else
mtr = pvt->b1_mtr[n];
return mtr;
}
/*
*/
static void decode_mtr(int slot_row, u16 mtr)
{
int ans;
ans = MTR_DIMMS_PRESENT(mtr);
debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
ans ? "Present" : "NOT Present");
if (!ans)
return;
debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
debugf2("\t\tELECTRICAL THROTTLING is %s\n",
MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
struct i5400_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
mtr = determine_mtr(pvt, csrow, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
if (amb_present_reg & (1 << csrow)) {
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
/* Add thenumber of ROW bits */
addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
/* add the number of COLUMN bits */
addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
/* add the number of RANK bits */
addrBits += MTR_DIMM_RANK(mtr);
addrBits += 6; /* add 64 bits per DIMM */
addrBits -= 20; /* divide by 2^^20 */
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
}
}
}
/*
* calculate_dimm_size
*
* also will output a DIMM matrix map, if debug is enabled, for viewing
* how the DIMMs are populated
*/
static void calculate_dimm_size(struct i5400_pvt *pvt)
{
struct i5400_dimm_info *dinfo;
int csrow, max_csrows;
char *p, *mem_buffer;
int space, n;
int channel;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
mem_buffer = p = kmalloc(space, GFP_KERNEL);
if (p == NULL) {
i5400_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
__FILE__, __func__);
return;
}
/* Scan all the actual CSROWS
* and calculate the information for each DIMM
* Start with the highest csrow first, to display it first
* and work toward the 0th csrow
*/
max_csrows = pvt->maxdimmperch;
for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
/* on an odd csrow, first output a 'boundary' marker,
* then reset the message buffer */
if (csrow & 0x1) {
n = snprintf(p, space, "---------------------------"
"--------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
n = snprintf(p, space, "csrow %2d ", csrow);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
dinfo = &pvt->dimm_info[csrow][channel];
handle_channel(pvt, csrow, channel, dinfo);
n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
p += n;
space -= n;
}
debugf2("%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "---------------------------"
"--------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
/* now output the 'channel' labels */
n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
n = snprintf(p, space, "channel %d | ", channel);
p += n;
space -= n;
}
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
kfree(mem_buffer);
}
/*
* i5400_get_mc_regs read in the necessary registers and
* cache locally
*
* Fills in the private data members
*/
static void i5400_get_mc_regs(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
u32 actual_tolm;
u16 limit;
int slot_row;
int maxch;
int maxdimmperch;
int way0, way1;
pvt = mci->pvt_info;
pci_read_config_dword(pvt->system_address, AMBASE,
(u32 *) &pvt->ambase);
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
((u32 *) &pvt->ambase) + sizeof(u32));
maxdimmperch = pvt->maxdimmperch;
maxch = pvt->maxch;
debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
/* Get the Branch Map regs */
pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
pvt->tolm);
actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
/* Get the MIR[0-1] regs */
limit = (pvt->mir0 >> 4) & 0x0fff;
way0 = pvt->mir0 & 0x1;
way1 = pvt->mir0 & 0x2;
debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
limit = (pvt->mir1 >> 4) & 0xfff;
way0 = pvt->mir1 & 0x1;
way1 = pvt->mir1 & 0x2;
debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
int where = MTR0 + (slot_row * sizeof(u16));
/* Branch 0 set of MTR registers */
pci_read_config_word(pvt->branch_0, where,
&pvt->b0_mtr[slot_row]);
debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
pvt->b0_mtr[slot_row]);
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_mtr[slot_row] = 0;
continue;
}
/* Branch 1 set of MTR registers */
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
pvt->b1_mtr[slot_row]);
}
/* Read and dump branch 0's MTRs */
debugf2("\nMemory Technology Registers:\n");
debugf2(" Branch 0:\n");
for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
&pvt->b0_ambpresent0);
debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
&pvt->b0_ambpresent1);
debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
} else {
/* Read and dump branch 1's MTRs */
debugf2(" Branch 1:\n");
for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
&pvt->b1_ambpresent0);
debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
&pvt->b1_ambpresent1);
debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
pvt->b1_ambpresent1);
}
/* Go and determine the size of each DIMM and place in an
* orderly matrix */
calculate_dimm_size(pvt);
}
/*
* i5400_init_csrows Initialize the 'csrows' table within
* the mci control structure with the
* addressing of memory.
*
* return:
* 0 success
* 1 no actual memory found on this MC
*/
static int i5400_init_csrows(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
struct csrow_info *p_csrow;
int empty, channel_count;
int max_csrows;
int mtr;
int csrow_megs;
int channel;
int csrow;
pvt = mci->pvt_info;
channel_count = pvt->maxch;
max_csrows = pvt->maxdimmperch;
empty = 1; /* Assume NO memory */
for (csrow = 0; csrow < max_csrows; csrow++) {
p_csrow = &mci->csrows[csrow];
p_csrow->csrow_idx = csrow;
/* use branch 0 for the basis */
mtr = determine_mtr(pvt, csrow, 0);
/* if no DIMMS on this row, continue */
if (!MTR_DIMMS_PRESENT(mtr))
continue;
/* FAKE OUT VALUES, FIXME */
p_csrow->first_page = 0 + csrow * 20;
p_csrow->last_page = 9 + csrow * 20;
p_csrow->page_mask = 0xFFF;
p_csrow->grain = 8;
csrow_megs = 0;
for (channel = 0; channel < pvt->maxch; channel++)
csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
p_csrow->nr_pages = csrow_megs << 8;
/* Assume DDR2 for now */
p_csrow->mtype = MEM_FB_DDR2;
/* ask what device type on this row */
if (MTR_DRAM_WIDTH(mtr))
p_csrow->dtype = DEV_X8;
else
p_csrow->dtype = DEV_X4;
p_csrow->edac_mode = EDAC_S8ECD8ED;
empty = 0;
}
return empty;
}
/*
* i5400_enable_error_reporting
* Turn on the memory reporting features of the hardware
*/
static void i5400_enable_error_reporting(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
u32 fbd_error_mask;
pvt = mci->pvt_info;
/* Read the FBD Error Mask Register */
pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
&fbd_error_mask);
/* Enable with a '0' */
fbd_error_mask &= ~(ENABLE_EMASK_ALL);
pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
fbd_error_mask);
}
/*
* i5400_probe1 Probe for ONE instance of device to see if it is
* present.
* return:
* 0 for FOUND a device
* < 0 for error code
*/
static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i5400_pvt *pvt;
int num_channels;
int num_dimms_per_channel;
int num_csrows;
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
__FILE__, __func__,
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
/* As we don't have a motherboard identification routine to determine
* actual number of slots/dimms per channel, we thus utilize the
* resource as specified by the chipset. Thus, we might have
* have more DIMMs per channel than actually on the mobo, but this
* allows the driver to support up to the chipset max, without
* some fancy mobo determination.
*/
num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
num_channels = MAX_CHANNELS;
num_csrows = num_dimms_per_channel;
debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
__func__, num_channels, num_dimms_per_channel, num_csrows);
/* allocate a new MC control structure */
mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
if (mci == NULL)
return -ENOMEM;
debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
pvt->maxch = num_channels;
pvt->maxdimmperch = num_dimms_per_channel;
/* 'get' the pci devices we want to reserve for our use */
if (i5400_get_devices(mci, dev_idx))
goto fail0;
/* Time to get serious */
i5400_get_mc_regs(mci); /* retrieve the hardware registers */
mci->mc_idx = 0;
mci->mtype_cap = MEM_FLAG_FB_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i5400_edac.c";
mci->mod_ver = I5400_REVISION;
mci->ctl_name = i5400_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
/* Set the function pointer to an actual operation function */
mci->edac_check = i5400_check_error;
/* initialize the MC control structure 'csrows' table
* with the mapping and control information */
if (i5400_init_csrows(mci)) {
debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
" because i5400_init_csrows() returned nonzero "
"value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
} else {
debugf1("MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
__FILE__, __func__);
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
goto fail1;
}
i5400_clear_error(mci);
/* allocating generic PCI control info */
i5400_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!i5400_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n",
__func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
return 0;
/* Error exit unwinding stack */
fail1:
i5400_put_devices(mci);
fail0:
edac_mc_free(mci);
return -ENODEV;
}
/*
* i5400_init_one constructor for one instance of device
*
* returns:
* negative on error
* count (>= 0)
*/
static int __devinit i5400_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int rc;
debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* wake up device */
rc = pci_enable_device(pdev);
if (rc)
return rc;
/* now probe and enable the device */
return i5400_probe1(pdev, id->driver_data);
}
/*
* i5400_remove_one destructor for one instance of device
*
*/
static void __devexit i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
debugf0("%s: %s()\n", __FILE__, __func__);
if (i5400_pci)
edac_pci_release_generic_ctl(i5400_pci);
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
/* retrieve references to resources, and free those resources */
i5400_put_devices(mci);
edac_mc_free(mci);
}
/*
* pci_device_id table for which devices we are looking for
*
* The "E500P" device is the first device supported.
*/
static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
{0,} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i5400_pci_tbl);
/*
* i5400_driver pci_driver structure for this module
*
*/
static struct pci_driver i5400_driver = {
.name = "i5400_edac",
.probe = i5400_init_one,
.remove = __devexit_p(i5400_remove_one),
.id_table = i5400_pci_tbl,
};
/*
* i5400_init Module entry function
* Try to initialize this module for its devices
*/
static int __init i5400_init(void)
{
int pci_rc;
debugf2("MC: %s: %s()\n", __FILE__, __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i5400_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
/*
* i5400_exit() Module exit function
* Unregister the driver
*/
static void __exit i5400_exit(void)
{
debugf2("MC: %s: %s()\n", __FILE__, __func__);
pci_unregister_driver(&i5400_driver);
}
module_init(i5400_init);
module_exit(i5400_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
I5400_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| gpl-2.0 |
aatjitra/Note2 | drivers/isdn/mISDN/layer2.c | 3028 | 50933 | /*
*
* Author Karsten Keil <kkeil@novell.com>
*
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/mISDNif.h>
#include <linux/slab.h>
#include "core.h"
#include "fsm.h"
#include "layer2.h"
static u_int *debug;
static
struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
static char *strL2State[] =
{
"ST_L2_1",
"ST_L2_2",
"ST_L2_3",
"ST_L2_4",
"ST_L2_5",
"ST_L2_6",
"ST_L2_7",
"ST_L2_8",
};
enum {
EV_L2_UI,
EV_L2_SABME,
EV_L2_DISC,
EV_L2_DM,
EV_L2_UA,
EV_L2_FRMR,
EV_L2_SUPER,
EV_L2_I,
EV_L2_DL_DATA,
EV_L2_ACK_PULL,
EV_L2_DL_UNITDATA,
EV_L2_DL_ESTABLISH_REQ,
EV_L2_DL_RELEASE_REQ,
EV_L2_MDL_ASSIGN,
EV_L2_MDL_REMOVE,
EV_L2_MDL_ERROR,
EV_L1_DEACTIVATE,
EV_L2_T200,
EV_L2_T203,
EV_L2_SET_OWN_BUSY,
EV_L2_CLEAR_OWN_BUSY,
EV_L2_FRAME_ERROR,
};
#define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
static char *strL2Event[] =
{
"EV_L2_UI",
"EV_L2_SABME",
"EV_L2_DISC",
"EV_L2_DM",
"EV_L2_UA",
"EV_L2_FRMR",
"EV_L2_SUPER",
"EV_L2_I",
"EV_L2_DL_DATA",
"EV_L2_ACK_PULL",
"EV_L2_DL_UNITDATA",
"EV_L2_DL_ESTABLISH_REQ",
"EV_L2_DL_RELEASE_REQ",
"EV_L2_MDL_ASSIGN",
"EV_L2_MDL_REMOVE",
"EV_L2_MDL_ERROR",
"EV_L1_DEACTIVATE",
"EV_L2_T200",
"EV_L2_T203",
"EV_L2_SET_OWN_BUSY",
"EV_L2_CLEAR_OWN_BUSY",
"EV_L2_FRAME_ERROR",
};
static void
l2m_debug(struct FsmInst *fi, char *fmt, ...)
{
struct layer2 *l2 = fi->userdata;
struct va_format vaf;
va_list va;
if (!(*debug & DEBUG_L2_FSM))
return;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
l2->sapi, l2->tei, &vaf);
va_end(va);
}
inline u_int
l2headersize(struct layer2 *l2, int ui)
{
return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
(test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
}
inline u_int
l2addrsize(struct layer2 *l2)
{
return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
}
static u_int
l2_newid(struct layer2 *l2)
{
u_int id;
id = l2->next_id++;
if (id == 0x7fff)
l2->next_id = 1;
id <<= 16;
id |= l2->tei << 8;
id |= l2->sapi;
return id;
}
static void
l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
{
int err;
if (!l2->up)
return;
mISDN_HEAD_PRIM(skb) = prim;
mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
err = l2->up->send(l2->up, skb);
if (err) {
printk(KERN_WARNING "%s: err=%d\n", __func__, err);
dev_kfree_skb(skb);
}
}
static void
l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
{
struct sk_buff *skb;
struct mISDNhead *hh;
int err;
if (!l2->up)
return;
skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = (l2->ch.nr << 16) | l2->ch.addr;
if (len)
memcpy(skb_put(skb, len), arg, len);
err = l2->up->send(l2->up, skb);
if (err) {
printk(KERN_WARNING "%s: err=%d\n", __func__, err);
dev_kfree_skb(skb);
}
}
static int
l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
int ret;
ret = l2->ch.recv(l2->ch.peer, skb);
if (ret && (*debug & DEBUG_L2_RECV))
printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
return ret;
}
static int
l2down_raw(struct layer2 *l2, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
if (hh->prim == PH_DATA_REQ) {
if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
skb_queue_tail(&l2->down_queue, skb);
return 0;
}
l2->down_id = mISDN_HEAD_ID(skb);
}
return l2down_skb(l2, skb);
}
static int
l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
{
struct mISDNhead *hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = id;
return l2down_raw(l2, skb);
}
static int
l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
{
struct sk_buff *skb;
int err;
struct mISDNhead *hh;
skb = mI_alloc_skb(len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
hh = mISDN_HEAD_P(skb);
hh->prim = prim;
hh->id = id;
if (len)
memcpy(skb_put(skb, len), arg, len);
err = l2down_raw(l2, skb);
if (err)
dev_kfree_skb(skb);
return err;
}
static int
ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
struct sk_buff *nskb = skb;
int ret = -EAGAIN;
if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
if (hh->id == l2->down_id) {
nskb = skb_dequeue(&l2->down_queue);
if (nskb) {
l2->down_id = mISDN_HEAD_ID(nskb);
if (l2down_skb(l2, nskb)) {
dev_kfree_skb(nskb);
l2->down_id = MISDN_ID_NONE;
}
} else
l2->down_id = MISDN_ID_NONE;
if (ret) {
dev_kfree_skb(skb);
ret = 0;
}
if (l2->down_id == MISDN_ID_NONE) {
test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
}
}
}
if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
nskb = skb_dequeue(&l2->down_queue);
if (nskb) {
l2->down_id = mISDN_HEAD_ID(nskb);
if (l2down_skb(l2, nskb)) {
dev_kfree_skb(nskb);
l2->down_id = MISDN_ID_NONE;
test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
}
} else
test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
}
return ret;
}
static int
l2mgr(struct layer2 *l2, u_int prim, void *arg) {
long c = (long)arg;
printk(KERN_WARNING
"l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
if (test_bit(FLG_LAPD, &l2->flag) &&
!test_bit(FLG_FIXED_TEI, &l2->flag)) {
switch (c) {
case 'C':
case 'D':
case 'G':
case 'H':
l2_tei(l2, prim, (u_long)arg);
break;
}
}
return 0;
}
static void
set_peer_busy(struct layer2 *l2) {
test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
test_and_set_bit(FLG_L2BLOCK, &l2->flag);
}
static void
clear_peer_busy(struct layer2 *l2) {
if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
}
static void
InitWin(struct layer2 *l2)
{
int i;
for (i = 0; i < MAX_WINDOW; i++)
l2->windowar[i] = NULL;
}
static int
freewin(struct layer2 *l2)
{
int i, cnt = 0;
for (i = 0; i < MAX_WINDOW; i++) {
if (l2->windowar[i]) {
cnt++;
dev_kfree_skb(l2->windowar[i]);
l2->windowar[i] = NULL;
}
}
return cnt;
}
static void
ReleaseWin(struct layer2 *l2)
{
int cnt = freewin(l2);
if (cnt)
printk(KERN_WARNING
"isdnl2 freed %d skbuffs in release\n", cnt);
}
inline unsigned int
cansend(struct layer2 *l2)
{
unsigned int p1;
if (test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
else
p1 = (l2->vs - l2->va) % 8;
return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
}
inline void
clear_exception(struct layer2 *l2)
{
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
test_and_clear_bit(FLG_REJEXC, &l2->flag);
test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
clear_peer_busy(l2);
}
static int
sethdraddr(struct layer2 *l2, u_char *header, int rsp)
{
u_char *ptr = header;
int crbit = rsp;
if (test_bit(FLG_LAPD, &l2->flag)) {
if (test_bit(FLG_LAPD_NET, &l2->flag))
crbit = !crbit;
*ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
*ptr++ = (l2->tei << 1) | 1;
return 2;
} else {
if (test_bit(FLG_ORIG, &l2->flag))
crbit = !crbit;
if (crbit)
*ptr++ = l2->addr.B;
else
*ptr++ = l2->addr.A;
return 1;
}
}
static inline void
enqueue_super(struct layer2 *l2, struct sk_buff *skb)
{
if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
dev_kfree_skb(skb);
}
static inline void
enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
{
if (l2->tm)
l2_tei(l2, MDL_STATUS_UI_IND, 0);
if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
dev_kfree_skb(skb);
}
inline int
IsUI(u_char *data)
{
return (data[0] & 0xef) == UI;
}
inline int
IsUA(u_char *data)
{
return (data[0] & 0xef) == UA;
}
inline int
IsDM(u_char *data)
{
return (data[0] & 0xef) == DM;
}
inline int
IsDISC(u_char *data)
{
return (data[0] & 0xef) == DISC;
}
inline int
IsRR(u_char *data, struct layer2 *l2)
{
if (test_bit(FLG_MOD128, &l2->flag))
return data[0] == RR;
else
return (data[0] & 0xf) == 1;
}
inline int
IsSFrame(u_char *data, struct layer2 *l2)
{
register u_char d = *data;
if (!test_bit(FLG_MOD128, &l2->flag))
d &= 0xf;
return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
}
inline int
IsSABME(u_char *data, struct layer2 *l2)
{
u_char d = data[0] & ~0x10;
return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
}
inline int
IsREJ(u_char *data, struct layer2 *l2)
{
return test_bit(FLG_MOD128, &l2->flag) ?
data[0] == REJ : (data[0] & 0xf) == REJ;
}
inline int
IsFRMR(u_char *data)
{
return (data[0] & 0xef) == FRMR;
}
inline int
IsRNR(u_char *data, struct layer2 *l2)
{
return test_bit(FLG_MOD128, &l2->flag) ?
data[0] == RNR : (data[0] & 0xf) == RNR;
}
static int
iframe_error(struct layer2 *l2, struct sk_buff *skb)
{
u_int i;
int rsp = *skb->data & 0x2;
i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (rsp)
return 'L';
if (skb->len < i)
return 'N';
if ((skb->len - i) > l2->maxlen)
return 'O';
return 0;
}
static int
super_error(struct layer2 *l2, struct sk_buff *skb)
{
if (skb->len != l2addrsize(l2) +
(test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
return 'N';
return 0;
}
static int
unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
{
int rsp = (*skb->data & 0x2) >> 1;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (rsp != wantrsp)
return 'L';
if (skb->len != l2addrsize(l2) + 1)
return 'N';
return 0;
}
static int
UI_error(struct layer2 *l2, struct sk_buff *skb)
{
int rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (rsp)
return 'L';
if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
return 'O';
return 0;
}
static int
FRMR_error(struct layer2 *l2, struct sk_buff *skb)
{
u_int headers = l2addrsize(l2) + 1;
u_char *datap = skb->data + headers;
int rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
if (!rsp)
return 'L';
if (test_bit(FLG_MOD128, &l2->flag)) {
if (skb->len < headers + 5)
return 'N';
else if (*debug & DEBUG_L2)
l2m_debug(&l2->l2m,
"FRMR information %2x %2x %2x %2x %2x",
datap[0], datap[1], datap[2], datap[3], datap[4]);
} else {
if (skb->len < headers + 3)
return 'N';
else if (*debug & DEBUG_L2)
l2m_debug(&l2->l2m,
"FRMR information %2x %2x %2x",
datap[0], datap[1], datap[2]);
}
return 0;
}
static unsigned int
legalnr(struct layer2 *l2, unsigned int nr)
{
if (test_bit(FLG_MOD128, &l2->flag))
return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
else
return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
}
static void
setva(struct layer2 *l2, unsigned int nr)
{
struct sk_buff *skb;
while (l2->va != nr) {
l2->va++;
if (test_bit(FLG_MOD128, &l2->flag))
l2->va %= 128;
else
l2->va %= 8;
if (l2->windowar[l2->sow]) {
skb_trim(l2->windowar[l2->sow], 0);
skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
l2->windowar[l2->sow] = NULL;
}
l2->sow = (l2->sow + 1) % l2->window;
}
skb = skb_dequeue(&l2->tmp_queue);
while (skb) {
dev_kfree_skb(skb);
skb = skb_dequeue(&l2->tmp_queue);
}
}
static void
send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
{
u_char tmp[MAX_L2HEADER_LEN];
int i;
i = sethdraddr(l2, tmp, cr);
tmp[i++] = cmd;
if (skb)
skb_trim(skb, 0);
else {
skb = mI_alloc_skb(i, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: can't alloc skbuff\n",
__func__);
return;
}
}
memcpy(skb_put(skb, i), tmp, i);
enqueue_super(l2, skb);
}
inline u_char
get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
{
return skb->data[l2addrsize(l2)] & 0x10;
}
inline u_char
get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
{
u_char PF;
PF = get_PollFlag(l2, skb);
dev_kfree_skb(skb);
return PF;
}
inline void
start_t200(struct layer2 *l2, int i)
{
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
test_and_set_bit(FLG_T200_RUN, &l2->flag);
}
inline void
restart_t200(struct layer2 *l2, int i)
{
mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
test_and_set_bit(FLG_T200_RUN, &l2->flag);
}
inline void
stop_t200(struct layer2 *l2, int i)
{
if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
mISDN_FsmDelTimer(&l2->t200, i);
}
inline void
st5_dl_release_l2l3(struct layer2 *l2)
{
int pr;
if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
pr = DL_RELEASE_CNF;
else
pr = DL_RELEASE_IND;
l2up_create(l2, pr, 0, NULL);
}
inline void
lapb_dl_release_l2l3(struct layer2 *l2, int f)
{
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
l2up_create(l2, f, 0, NULL);
}
static void
establishlink(struct FsmInst *fi)
{
struct layer2 *l2 = fi->userdata;
u_char cmd;
clear_exception(l2);
l2->rc = 0;
cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
send_uframe(l2, NULL, cmd, CMD);
mISDN_FsmDelTimer(&l2->t203, 1);
restart_t200(l2, 1);
test_and_clear_bit(FLG_PEND_REL, &l2->flag);
freewin(l2);
mISDN_FsmChangeState(fi, ST_L2_5);
}
static void
l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
if (get_PollFlagFree(l2, skb))
l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
else
l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
}
static void
l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
if (get_PollFlagFree(l2, skb))
l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
else {
l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
}
static void
l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
if (get_PollFlagFree(l2, skb))
l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
else
l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
static void
l2_go_st3(struct FsmInst *fi, int event, void *arg)
{
dev_kfree_skb((struct sk_buff *)arg);
mISDN_FsmChangeState(fi, ST_L2_3);
}
static void
l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L2_3);
dev_kfree_skb((struct sk_buff *)arg);
l2_tei(l2, MDL_ASSIGN_IND, 0);
}
static void
l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->ui_queue, skb);
mISDN_FsmChangeState(fi, ST_L2_2);
l2_tei(l2, MDL_ASSIGN_IND, 0);
}
static void
l2_queue_ui(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->ui_queue, skb);
}
static void
tx_ui(struct layer2 *l2)
{
struct sk_buff *skb;
u_char header[MAX_L2HEADER_LEN];
int i;
i = sethdraddr(l2, header, CMD);
if (test_bit(FLG_LAPD_NET, &l2->flag))
header[1] = 0xff; /* tei 127 */
header[i++] = UI;
while ((skb = skb_dequeue(&l2->ui_queue))) {
memcpy(skb_push(skb, i), header, i);
enqueue_ui(l2, skb);
}
}
static void
l2_send_ui(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->ui_queue, skb);
tx_ui(l2);
}
static void
l2_got_ui(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_pull(skb, l2headersize(l2, 1));
/*
* in states 1-3 for broadcast
*/
if (l2->tm)
l2_tei(l2, MDL_STATUS_UI_IND, 0);
l2up(l2, DL_UNITDATA_IND, skb);
}
static void
l2_establish(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
test_and_clear_bit(FLG_PEND_REL, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_release(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_trim(skb, 0);
l2up(l2, DL_RELEASE_CNF, skb);
}
static void
l2_pend_rel(struct FsmInst *fi, int event, void *arg)
{
struct sk_buff *skb = arg;
struct layer2 *l2 = fi->userdata;
test_and_set_bit(FLG_PEND_REL, &l2->flag);
dev_kfree_skb(skb);
}
static void
l2_disconnect(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
freewin(l2);
mISDN_FsmChangeState(fi, ST_L2_6);
l2->rc = 0;
send_uframe(l2, NULL, DISC | 0x10, CMD);
mISDN_FsmDelTimer(&l2->t203, 1);
restart_t200(l2, 2);
if (skb)
dev_kfree_skb(skb);
}
static void
l2_start_multi(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
l2->vs = 0;
l2->va = 0;
l2->vr = 0;
l2->sow = 0;
clear_exception(l2);
send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
mISDN_FsmChangeState(fi, ST_L2_7);
mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
skb_trim(skb, 0);
l2up(l2, DL_ESTABLISH_IND, skb);
if (l2->tm)
l2_tei(l2, MDL_STATUS_UP_IND, 0);
}
static void
l2_send_UA(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
}
static void
l2_send_DM(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
}
static void
l2_restart_multi(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int est = 0;
send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
if (l2->vs != l2->va) {
skb_queue_purge(&l2->i_queue);
est = 1;
}
clear_exception(l2);
l2->vs = 0;
l2->va = 0;
l2->vr = 0;
l2->sow = 0;
mISDN_FsmChangeState(fi, ST_L2_7);
stop_t200(l2, 3);
mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
if (est)
l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
* MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
* 0, NULL, 0);
*/
if (skb_queue_len(&l2->i_queue) && cansend(l2))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
}
static void
l2_stop_multi(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
mISDN_FsmChangeState(fi, ST_L2_4);
mISDN_FsmDelTimer(&l2->t203, 3);
stop_t200(l2, 4);
send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
skb_queue_purge(&l2->i_queue);
freewin(l2);
lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_connected(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int pr = -1;
if (!get_PollFlag(l2, skb)) {
l2_mdl_error_ua(fi, event, arg);
return;
}
dev_kfree_skb(skb);
if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
l2_disconnect(fi, event, NULL);
if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
pr = DL_ESTABLISH_CNF;
} else if (l2->vs != l2->va) {
skb_queue_purge(&l2->i_queue);
pr = DL_ESTABLISH_IND;
}
stop_t200(l2, 5);
l2->vr = 0;
l2->vs = 0;
l2->va = 0;
l2->sow = 0;
mISDN_FsmChangeState(fi, ST_L2_7);
mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
if (pr != -1)
l2up_create(l2, pr, 0, NULL);
if (skb_queue_len(&l2->i_queue) && cansend(l2))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
if (l2->tm)
l2_tei(l2, MDL_STATUS_UP_IND, 0);
}
static void
l2_released(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!get_PollFlag(l2, skb)) {
l2_mdl_error_ua(fi, event, arg);
return;
}
dev_kfree_skb(skb);
stop_t200(l2, 6);
lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_reestablish(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!get_PollFlagFree(l2, skb)) {
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
}
}
static void
l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (get_PollFlagFree(l2, skb)) {
stop_t200(l2, 7);
if (!test_bit(FLG_L3_INIT, &l2->flag))
skb_queue_purge(&l2->i_queue);
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ,
l2_newid(l2), 0, NULL);
st5_dl_release_l2l3(l2);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
}
static void
l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (get_PollFlagFree(l2, skb)) {
stop_t200(l2, 8);
lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
}
static void
enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
{
struct sk_buff *skb;
u_char tmp[MAX_L2HEADER_LEN];
int i;
i = sethdraddr(l2, tmp, cr);
if (test_bit(FLG_MOD128, &l2->flag)) {
tmp[i++] = typ;
tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
} else
tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
skb = mI_alloc_skb(i, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING
"isdnl2 can't alloc sbbuff for enquiry_cr\n");
return;
}
memcpy(skb_put(skb, i), tmp, i);
enqueue_super(l2, skb);
}
inline void
enquiry_response(struct layer2 *l2)
{
if (test_bit(FLG_OWN_BUSY, &l2->flag))
enquiry_cr(l2, RNR, RSP, 1);
else
enquiry_cr(l2, RR, RSP, 1);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
inline void
transmit_enquiry(struct layer2 *l2)
{
if (test_bit(FLG_OWN_BUSY, &l2->flag))
enquiry_cr(l2, RNR, CMD, 1);
else
enquiry_cr(l2, RR, CMD, 1);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
start_t200(l2, 9);
}
static void
nrerrorrecovery(struct FsmInst *fi)
{
struct layer2 *l2 = fi->userdata;
l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
static void
invoke_retransmission(struct layer2 *l2, unsigned int nr)
{
u_int p1;
if (l2->vs != nr) {
while (l2->vs != nr) {
(l2->vs)--;
if (test_bit(FLG_MOD128, &l2->flag)) {
l2->vs %= 128;
p1 = (l2->vs - l2->va) % 128;
} else {
l2->vs %= 8;
p1 = (l2->vs - l2->va) % 8;
}
p1 = (p1 + l2->sow) % l2->window;
if (l2->windowar[p1])
skb_queue_head(&l2->i_queue, l2->windowar[p1]);
else
printk(KERN_WARNING
"%s: windowar[%d] is NULL\n",
__func__, p1);
l2->windowar[p1] = NULL;
}
mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
}
}
static void
l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int PollFlag, rsp, typ = RR;
unsigned int nr;
rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
skb_pull(skb, l2addrsize(l2));
if (IsRNR(skb->data, l2)) {
set_peer_busy(l2);
typ = RNR;
} else
clear_peer_busy(l2);
if (IsREJ(skb->data, l2))
typ = REJ;
if (test_bit(FLG_MOD128, &l2->flag)) {
PollFlag = (skb->data[1] & 0x1) == 0x1;
nr = skb->data[1] >> 1;
} else {
PollFlag = (skb->data[0] & 0x10);
nr = (skb->data[0] >> 5) & 0x7;
}
dev_kfree_skb(skb);
if (PollFlag) {
if (rsp)
l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
else
enquiry_response(l2);
}
if (legalnr(l2, nr)) {
if (typ == REJ) {
setva(l2, nr);
invoke_retransmission(l2, nr);
stop_t200(l2, 10);
if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 6))
l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
} else if ((nr == l2->vs) && (typ == RR)) {
setva(l2, nr);
stop_t200(l2, 11);
mISDN_FsmRestartTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 7);
} else if ((l2->va != nr) || (typ == RNR)) {
setva(l2, nr);
if (typ != RR)
mISDN_FsmDelTimer(&l2->t203, 9);
restart_t200(l2, 12);
}
if (skb_queue_len(&l2->i_queue) && (typ == RR))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
} else
nrerrorrecovery(fi);
}
static void
l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!test_bit(FLG_L3_INIT, &l2->flag))
skb_queue_tail(&l2->i_queue, skb);
else
dev_kfree_skb(skb);
}
static void
l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->i_queue, skb);
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
}
static void
l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_tail(&l2->i_queue, skb);
}
static void
l2_got_iframe(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int PollFlag, i;
u_int ns, nr;
i = l2addrsize(l2);
if (test_bit(FLG_MOD128, &l2->flag)) {
PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
ns = skb->data[i] >> 1;
nr = (skb->data[i + 1] >> 1) & 0x7f;
} else {
PollFlag = (skb->data[i] & 0x10);
ns = (skb->data[i] >> 1) & 0x7;
nr = (skb->data[i] >> 5) & 0x7;
}
if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
dev_kfree_skb(skb);
if (PollFlag)
enquiry_response(l2);
} else {
if (l2->vr == ns) {
l2->vr++;
if (test_bit(FLG_MOD128, &l2->flag))
l2->vr %= 128;
else
l2->vr %= 8;
test_and_clear_bit(FLG_REJEXC, &l2->flag);
if (PollFlag)
enquiry_response(l2);
else
test_and_set_bit(FLG_ACK_PEND, &l2->flag);
skb_pull(skb, l2headersize(l2, 0));
l2up(l2, DL_DATA_IND, skb);
} else {
/* n(s)!=v(r) */
dev_kfree_skb(skb);
if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
if (PollFlag)
enquiry_response(l2);
} else {
enquiry_cr(l2, REJ, RSP, PollFlag);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
}
}
if (legalnr(l2, nr)) {
if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
(fi->state == ST_L2_7)) {
if (nr == l2->vs) {
stop_t200(l2, 13);
mISDN_FsmRestartTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 7);
} else if (nr != l2->va)
restart_t200(l2, 14);
}
setva(l2, nr);
} else {
nrerrorrecovery(fi);
return;
}
if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
enquiry_cr(l2, RR, RSP, 0);
}
static void
l2_got_tei(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
u_int info;
l2->tei = (signed char)(long)arg;
set_channel_address(&l2->ch, l2->sapi, l2->tei);
info = DL_INFO_L2_CONNECT;
l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
if (fi->state == ST_L2_3) {
establishlink(fi);
test_and_set_bit(FLG_L3_INIT, &l2->flag);
} else
mISDN_FsmChangeState(fi, ST_L2_4);
if (skb_queue_len(&l2->ui_queue))
tx_ui(l2);
}
static void
l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
} else if (l2->rc == l2->N200) {
mISDN_FsmChangeState(fi, ST_L2_4);
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
skb_queue_purge(&l2->i_queue);
l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ,
l2_newid(l2), 0, NULL);
st5_dl_release_l2l3(l2);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
} else {
l2->rc++;
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
SABME : SABM) | 0x10, CMD);
}
}
static void
l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
} else if (l2->rc == l2->N200) {
mISDN_FsmChangeState(fi, ST_L2_4);
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
} else {
l2->rc++;
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
NULL, 9);
send_uframe(l2, NULL, DISC | 0x10, CMD);
}
}
static void
l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
return;
}
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
l2->rc = 0;
mISDN_FsmChangeState(fi, ST_L2_8);
transmit_enquiry(l2);
l2->rc++;
}
static void
l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
return;
}
test_and_clear_bit(FLG_T200_RUN, &l2->flag);
if (l2->rc == l2->N200) {
l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
} else {
transmit_enquiry(l2);
l2->rc++;
}
}
static void
l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
if (test_bit(FLG_LAPD, &l2->flag) &&
test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
return;
}
mISDN_FsmChangeState(fi, ST_L2_8);
transmit_enquiry(l2);
l2->rc = 0;
}
static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb, *nskb, *oskb;
u_char header[MAX_L2HEADER_LEN];
u_int i, p1;
if (!cansend(l2))
return;
skb = skb_dequeue(&l2->i_queue);
if (!skb)
return;
if (test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
else
p1 = (l2->vs - l2->va) % 8;
p1 = (p1 + l2->sow) % l2->window;
if (l2->windowar[p1]) {
printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
p1);
dev_kfree_skb(l2->windowar[p1]);
}
l2->windowar[p1] = skb;
i = sethdraddr(l2, header, CMD);
if (test_bit(FLG_MOD128, &l2->flag)) {
header[i++] = l2->vs << 1;
header[i++] = l2->vr << 1;
l2->vs = (l2->vs + 1) % 128;
} else {
header[i++] = (l2->vr << 5) | (l2->vs << 1);
l2->vs = (l2->vs + 1) % 8;
}
nskb = skb_clone(skb, GFP_ATOMIC);
p1 = skb_headroom(nskb);
if (p1 >= i)
memcpy(skb_push(nskb, i), header, i);
else {
printk(KERN_WARNING
"isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
oskb = nskb;
nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
if (!nskb) {
dev_kfree_skb(oskb);
printk(KERN_WARNING "%s: no skb mem\n", __func__);
return;
}
memcpy(skb_put(nskb, i), header, i);
memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
dev_kfree_skb(oskb);
}
l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
mISDN_FsmDelTimer(&l2->t203, 13);
mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
}
}
static void
l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
int PollFlag, rsp, rnr = 0;
unsigned int nr;
rsp = *skb->data & 0x2;
if (test_bit(FLG_ORIG, &l2->flag))
rsp = !rsp;
skb_pull(skb, l2addrsize(l2));
if (IsRNR(skb->data, l2)) {
set_peer_busy(l2);
rnr = 1;
} else
clear_peer_busy(l2);
if (test_bit(FLG_MOD128, &l2->flag)) {
PollFlag = (skb->data[1] & 0x1) == 0x1;
nr = skb->data[1] >> 1;
} else {
PollFlag = (skb->data[0] & 0x10);
nr = (skb->data[0] >> 5) & 0x7;
}
dev_kfree_skb(skb);
if (rsp && PollFlag) {
if (legalnr(l2, nr)) {
if (rnr) {
restart_t200(l2, 15);
} else {
stop_t200(l2, 16);
mISDN_FsmAddTimer(&l2->t203, l2->T203,
EV_L2_T203, NULL, 5);
setva(l2, nr);
}
invoke_retransmission(l2, nr);
mISDN_FsmChangeState(fi, ST_L2_7);
if (skb_queue_len(&l2->i_queue) && cansend(l2))
mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
} else
nrerrorrecovery(fi);
} else {
if (!rsp && PollFlag)
enquiry_response(l2);
if (legalnr(l2, nr))
setva(l2, nr);
else
nrerrorrecovery(fi);
}
}
static void
l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_pull(skb, l2addrsize(l2) + 1);
if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
(IsUA(skb->data) && (fi->state == ST_L2_7))) {
l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
dev_kfree_skb(skb);
}
static void
l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->ui_queue);
l2->tei = GROUP_TEI;
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->ui_queue);
l2->tei = GROUP_TEI;
l2up_create(l2, DL_RELEASE_IND, 0, NULL);
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
l2->tei = GROUP_TEI;
stop_t200(l2, 17);
st5_dl_release_l2l3(l2);
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->ui_queue);
l2->tei = GROUP_TEI;
stop_t200(l2, 18);
l2up_create(l2, DL_RELEASE_IND, 0, NULL);
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_tei_remove(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
l2->tei = GROUP_TEI;
stop_t200(l2, 17);
mISDN_FsmDelTimer(&l2->t203, 19);
l2up_create(l2, DL_RELEASE_IND, 0, NULL);
/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
* MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
* 0, NULL, 0);
*/
mISDN_FsmChangeState(fi, ST_L2_1);
}
static void
l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
l2up(l2, DL_RELEASE_IND, skb);
else
dev_kfree_skb(skb);
}
static void
l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
stop_t200(l2, 19);
st5_dl_release_l2l3(l2);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
dev_kfree_skb(skb);
}
static void
l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->ui_queue);
stop_t200(l2, 20);
l2up(l2, DL_RELEASE_CNF, skb);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_persistent_da(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
freewin(l2);
stop_t200(l2, 19);
mISDN_FsmDelTimer(&l2->t203, 19);
l2up(l2, DL_RELEASE_IND, skb);
mISDN_FsmChangeState(fi, ST_L2_4);
if (l2->tm)
l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
}
static void
l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
enquiry_cr(l2, RNR, RSP, 0);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
if (skb)
dev_kfree_skb(skb);
}
static void
l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
struct sk_buff *skb = arg;
if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
enquiry_cr(l2, RR, RSP, 0);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
if (skb)
dev_kfree_skb(skb);
}
static void
l2_frame_error(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
l2mgr(l2, MDL_ERROR_IND, arg);
}
static void
l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
{
struct layer2 *l2 = fi->userdata;
l2mgr(l2, MDL_ERROR_IND, arg);
establishlink(fi);
test_and_clear_bit(FLG_L3_INIT, &l2->flag);
}
static struct FsmNode L2FnList[] =
{
{ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
{ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
{ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
{ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
{ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
{ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
{ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
{ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
{ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
{ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
{ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
{ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
{ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
{ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
{ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
{ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
{ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
{ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
{ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
{ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
{ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
{ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
{ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
{ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
{ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
{ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
{ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
{ST_L2_4, EV_L2_SABME, l2_start_multi},
{ST_L2_5, EV_L2_SABME, l2_send_UA},
{ST_L2_6, EV_L2_SABME, l2_send_DM},
{ST_L2_7, EV_L2_SABME, l2_restart_multi},
{ST_L2_8, EV_L2_SABME, l2_restart_multi},
{ST_L2_4, EV_L2_DISC, l2_send_DM},
{ST_L2_5, EV_L2_DISC, l2_send_DM},
{ST_L2_6, EV_L2_DISC, l2_send_UA},
{ST_L2_7, EV_L2_DISC, l2_stop_multi},
{ST_L2_8, EV_L2_DISC, l2_stop_multi},
{ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
{ST_L2_5, EV_L2_UA, l2_connected},
{ST_L2_6, EV_L2_UA, l2_released},
{ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
{ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
{ST_L2_4, EV_L2_DM, l2_reestablish},
{ST_L2_5, EV_L2_DM, l2_st5_dm_release},
{ST_L2_6, EV_L2_DM, l2_st6_dm_release},
{ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
{ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
{ST_L2_1, EV_L2_UI, l2_got_ui},
{ST_L2_2, EV_L2_UI, l2_got_ui},
{ST_L2_3, EV_L2_UI, l2_got_ui},
{ST_L2_4, EV_L2_UI, l2_got_ui},
{ST_L2_5, EV_L2_UI, l2_got_ui},
{ST_L2_6, EV_L2_UI, l2_got_ui},
{ST_L2_7, EV_L2_UI, l2_got_ui},
{ST_L2_8, EV_L2_UI, l2_got_ui},
{ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
{ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
{ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
{ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
{ST_L2_7, EV_L2_I, l2_got_iframe},
{ST_L2_8, EV_L2_I, l2_got_iframe},
{ST_L2_5, EV_L2_T200, l2_st5_tout_200},
{ST_L2_6, EV_L2_T200, l2_st6_tout_200},
{ST_L2_7, EV_L2_T200, l2_st7_tout_200},
{ST_L2_8, EV_L2_T200, l2_st8_tout_200},
{ST_L2_7, EV_L2_T203, l2_st7_tout_203},
{ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
{ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
{ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
{ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
{ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
{ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
{ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
{ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
{ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
{ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
{ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
{ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
{ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
{ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
{ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
{ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
{ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
{ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
};
static int
ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
{
u_char *datap = skb->data;
int ret = -EINVAL;
int psapi, ptei;
u_int l;
int c = 0;
l = l2addrsize(l2);
if (skb->len <= l) {
mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
return ret;
}
if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
psapi = *datap++;
ptei = *datap++;
if ((psapi & 1) || !(ptei & 1)) {
printk(KERN_WARNING
"l2 D-channel frame wrong EA0/EA1\n");
return ret;
}
psapi >>= 2;
ptei >>= 1;
if (psapi != l2->sapi) {
/* not our business */
if (*debug & DEBUG_L2)
printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
__func__, psapi, l2->sapi);
dev_kfree_skb(skb);
return 0;
}
if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
/* not our business */
if (*debug & DEBUG_L2)
printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
__func__, ptei, l2->tei);
dev_kfree_skb(skb);
return 0;
}
} else
datap += l;
if (!(*datap & 1)) { /* I-Frame */
c = iframe_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
} else if (IsSFrame(datap, l2)) { /* S-Frame */
c = super_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
} else if (IsUI(datap)) {
c = UI_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
} else if (IsSABME(datap, l2)) {
c = unnum_error(l2, skb, CMD);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
} else if (IsUA(datap)) {
c = unnum_error(l2, skb, RSP);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
} else if (IsDISC(datap)) {
c = unnum_error(l2, skb, CMD);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
} else if (IsDM(datap)) {
c = unnum_error(l2, skb, RSP);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
} else if (IsFRMR(datap)) {
c = FRMR_error(l2, skb);
if (!c)
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
} else
c = 'L';
if (c) {
printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
}
return ret;
}
static int
l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct layer2 *l2 = container_of(ch, struct layer2, ch);
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int ret = -EINVAL;
if (*debug & DEBUG_L2_RECV)
printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
__func__, hh->prim, hh->id, l2->sapi, l2->tei);
switch (hh->prim) {
case PH_DATA_IND:
ret = ph_data_indication(l2, hh, skb);
break;
case PH_DATA_CNF:
ret = ph_data_confirm(l2, hh, skb);
break;
case PH_ACTIVATE_IND:
test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
ret = mISDN_FsmEvent(&l2->l2m,
EV_L2_DL_ESTABLISH_REQ, skb);
break;
case PH_DEACTIVATE_IND:
test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
break;
case MPH_INFORMATION_IND:
if (!l2->up)
break;
ret = l2->up->send(l2->up, skb);
break;
case DL_DATA_REQ:
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
break;
case DL_UNITDATA_REQ:
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
break;
case DL_ESTABLISH_REQ:
if (test_bit(FLG_LAPB, &l2->flag))
test_and_set_bit(FLG_ORIG, &l2->flag);
if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
if (test_bit(FLG_LAPD, &l2->flag) ||
test_bit(FLG_ORIG, &l2->flag))
ret = mISDN_FsmEvent(&l2->l2m,
EV_L2_DL_ESTABLISH_REQ, skb);
} else {
if (test_bit(FLG_LAPD, &l2->flag) ||
test_bit(FLG_ORIG, &l2->flag)) {
test_and_set_bit(FLG_ESTAB_PEND,
&l2->flag);
}
ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
skb);
}
break;
case DL_RELEASE_REQ:
if (test_bit(FLG_LAPB, &l2->flag))
l2down_create(l2, PH_DEACTIVATE_REQ,
l2_newid(l2), 0, NULL);
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
skb);
break;
default:
if (*debug & DEBUG_L2)
l2m_debug(&l2->l2m, "l2 unknown pr %04x",
hh->prim);
}
if (ret) {
dev_kfree_skb(skb);
ret = 0;
}
return ret;
}
int
tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
{
int ret = -EINVAL;
if (*debug & DEBUG_L2_TEI)
printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
switch (cmd) {
case (MDL_ASSIGN_REQ):
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
break;
case (MDL_REMOVE_REQ):
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
break;
case (MDL_ERROR_IND):
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
break;
case (MDL_ERROR_RSP):
/* ETS 300-125 5.3.2.1 Test: TC13010 */
printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
break;
}
return ret;
}
static void
release_l2(struct layer2 *l2)
{
mISDN_FsmDelTimer(&l2->t200, 21);
mISDN_FsmDelTimer(&l2->t203, 16);
skb_queue_purge(&l2->i_queue);
skb_queue_purge(&l2->ui_queue);
skb_queue_purge(&l2->down_queue);
ReleaseWin(l2);
if (test_bit(FLG_LAPD, &l2->flag)) {
TEIrelease(l2);
if (l2->ch.st)
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
CLOSE_CHANNEL, NULL);
}
kfree(l2);
}
static int
l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct layer2 *l2 = container_of(ch, struct layer2, ch);
u_int info;
if (*debug & DEBUG_L2_CTRL)
printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
switch (cmd) {
case OPEN_CHANNEL:
if (test_bit(FLG_LAPD, &l2->flag)) {
set_channel_address(&l2->ch, l2->sapi, l2->tei);
info = DL_INFO_L2_CONNECT;
l2up_create(l2, DL_INFORMATION_IND,
sizeof(info), &info);
}
break;
case CLOSE_CHANNEL:
if (l2->ch.peer)
l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
release_l2(l2);
break;
}
return 0;
}
struct layer2 *
create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
int sapi)
{
struct layer2 *l2;
struct channel_req rq;
l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
if (!l2) {
printk(KERN_ERR "kzalloc layer2 failed\n");
return NULL;
}
l2->next_id = 1;
l2->down_id = MISDN_ID_NONE;
l2->up = ch;
l2->ch.st = ch->st;
l2->ch.send = l2_send;
l2->ch.ctrl = l2_ctrl;
switch (protocol) {
case ISDN_P_LAPD_NT:
test_and_set_bit(FLG_LAPD, &l2->flag);
test_and_set_bit(FLG_LAPD_NET, &l2->flag);
test_and_set_bit(FLG_MOD128, &l2->flag);
l2->sapi = sapi;
l2->maxlen = MAX_DFRAME_LEN;
if (test_bit(OPTION_L2_PMX, &options))
l2->window = 7;
else
l2->window = 1;
if (test_bit(OPTION_L2_PTP, &options))
test_and_set_bit(FLG_PTP, &l2->flag);
if (test_bit(OPTION_L2_FIXEDTEI, &options))
test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
l2->tei = tei;
l2->T200 = 1000;
l2->N200 = 3;
l2->T203 = 10000;
if (test_bit(OPTION_L2_PMX, &options))
rq.protocol = ISDN_P_NT_E1;
else
rq.protocol = ISDN_P_NT_S0;
rq.adr.channel = 0;
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
break;
case ISDN_P_LAPD_TE:
test_and_set_bit(FLG_LAPD, &l2->flag);
test_and_set_bit(FLG_MOD128, &l2->flag);
test_and_set_bit(FLG_ORIG, &l2->flag);
l2->sapi = sapi;
l2->maxlen = MAX_DFRAME_LEN;
if (test_bit(OPTION_L2_PMX, &options))
l2->window = 7;
else
l2->window = 1;
if (test_bit(OPTION_L2_PTP, &options))
test_and_set_bit(FLG_PTP, &l2->flag);
if (test_bit(OPTION_L2_FIXEDTEI, &options))
test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
l2->tei = tei;
l2->T200 = 1000;
l2->N200 = 3;
l2->T203 = 10000;
if (test_bit(OPTION_L2_PMX, &options))
rq.protocol = ISDN_P_TE_E1;
else
rq.protocol = ISDN_P_TE_S0;
rq.adr.channel = 0;
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
break;
case ISDN_P_B_X75SLP:
test_and_set_bit(FLG_LAPB, &l2->flag);
l2->window = 7;
l2->maxlen = MAX_DATA_SIZE;
l2->T200 = 1000;
l2->N200 = 4;
l2->T203 = 5000;
l2->addr.A = 3;
l2->addr.B = 1;
break;
default:
printk(KERN_ERR "layer2 create failed prt %x\n",
protocol);
kfree(l2);
return NULL;
}
skb_queue_head_init(&l2->i_queue);
skb_queue_head_init(&l2->ui_queue);
skb_queue_head_init(&l2->down_queue);
skb_queue_head_init(&l2->tmp_queue);
InitWin(l2);
l2->l2m.fsm = &l2fsm;
if (test_bit(FLG_LAPB, &l2->flag) ||
test_bit(FLG_PTP, &l2->flag) ||
test_bit(FLG_LAPD_NET, &l2->flag))
l2->l2m.state = ST_L2_4;
else
l2->l2m.state = ST_L2_1;
l2->l2m.debug = *debug;
l2->l2m.userdata = l2;
l2->l2m.userint = 0;
l2->l2m.printdebug = l2m_debug;
mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
return l2;
}
static int
x75create(struct channel_req *crq)
{
struct layer2 *l2;
if (crq->protocol != ISDN_P_B_X75SLP)
return -EPROTONOSUPPORT;
l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
if (!l2)
return -ENOMEM;
crq->ch = &l2->ch;
crq->protocol = ISDN_P_B_HDLC;
return 0;
}
static struct Bprotocol X75SLP = {
.Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
.name = "X75SLP",
.create = x75create
};
int
Isdnl2_Init(u_int *deb)
{
debug = deb;
mISDN_register_Bprotocol(&X75SLP);
l2fsm.state_count = L2_STATE_COUNT;
l2fsm.event_count = L2_EVENT_COUNT;
l2fsm.strEvent = strL2Event;
l2fsm.strState = strL2State;
mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
TEIInit(deb);
return 0;
}
void
Isdnl2_cleanup(void)
{
mISDN_unregister_Bprotocol(&X75SLP);
TEIFree();
mISDN_FsmFree(&l2fsm);
}
| gpl-2.0 |
qqzwc/JBX_Kernel | drivers/isdn/hisax/elsa_cs.c | 3028 | 6386 | /*======================================================================
An elsa_cs PCMCIA client driver
This driver is for the Elsa PCM ISDN Cards, i.e. the MicroLink
The contents of this file are subject to the Mozilla Public
License Version 1.1 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of
the License at http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS
IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
implied. See the License for the specific language governing
rights and limitations under the License.
The initial developer of the original code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
Modifications from dummy_cs.c are Copyright (C) 1999-2001 Klaus
Lichtenwalder <Lichtenwalder@ACM.org>. All Rights Reserved.
Alternatively, the contents of this file may be used under the
terms of the GNU General Public License version 2 (the "GPL"), in
which case the provisions of the GPL are applicable instead of the
above. If you wish to allow the use of your version of this file
only under the terms of the GPL and not to allow others to use
your version of this file under the MPL, indicate your decision
by deleting the provisions above and replace them with the notice
and other provisions required by the GPL. If you do not delete
the provisions above, a recipient may use your version of this
file under either the MPL or the GPL.
======================================================================*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <asm/io.h>
#include <asm/system.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include "hisax_cfg.h"
MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Elsa PCM cards");
MODULE_AUTHOR("Klaus Lichtenwalder");
MODULE_LICENSE("Dual MPL/GPL");
/*====================================================================*/
/* Parameters that can be set with 'insmod' */
static int protocol = 2; /* EURO-ISDN Default */
module_param(protocol, int, 0);
static int elsa_cs_config(struct pcmcia_device *link) __devinit ;
static void elsa_cs_release(struct pcmcia_device *link);
static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
typedef struct local_info_t {
struct pcmcia_device *p_dev;
int busy;
int cardnr;
} local_info_t;
static int __devinit elsa_cs_probe(struct pcmcia_device *link)
{
local_info_t *local;
dev_dbg(&link->dev, "elsa_cs_attach()\n");
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local) return -ENOMEM;
local->p_dev = link;
link->priv = local;
local->cardnr = -1;
return elsa_cs_config(link);
} /* elsa_cs_attach */
static void __devexit elsa_cs_detach(struct pcmcia_device *link)
{
local_info_t *info = link->priv;
dev_dbg(&link->dev, "elsa_cs_detach(0x%p)\n", link);
info->busy = 1;
elsa_cs_release(link);
kfree(info);
} /* elsa_cs_detach */
static int elsa_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
{
int j;
p_dev->io_lines = 3;
p_dev->resource[0]->end = 8;
p_dev->resource[0]->flags &= IO_DATA_PATH_WIDTH;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
if ((p_dev->resource[0]->end) && p_dev->resource[0]->start) {
printk(KERN_INFO "(elsa_cs: looks like the 96 model)\n");
if (!pcmcia_request_io(p_dev))
return 0;
} else {
printk(KERN_INFO "(elsa_cs: looks like the 97 model)\n");
for (j = 0x2f0; j > 0x100; j -= 0x10) {
p_dev->resource[0]->start = j;
if (!pcmcia_request_io(p_dev))
return 0;
}
}
return -ENODEV;
}
static int __devinit elsa_cs_config(struct pcmcia_device *link)
{
int i;
IsdnCard_t icard;
dev_dbg(&link->dev, "elsa_config(0x%p)\n", link);
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL);
if (i != 0)
goto failed;
if (!link->irq)
goto failed;
i = pcmcia_enable_device(link);
if (i != 0)
goto failed;
icard.para[0] = link->irq;
icard.para[1] = link->resource[0]->start;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_ELSA_PCMCIA;
i = hisax_init_pcmcia(link, &(((local_info_t*)link->priv)->busy), &icard);
if (i < 0) {
printk(KERN_ERR "elsa_cs: failed to initialize Elsa "
"PCMCIA %d with %pR\n", i, link->resource[0]);
elsa_cs_release(link);
} else
((local_info_t*)link->priv)->cardnr = i;
return 0;
failed:
elsa_cs_release(link);
return -ENODEV;
} /* elsa_cs_config */
static void elsa_cs_release(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
dev_dbg(&link->dev, "elsa_cs_release(0x%p)\n", link);
if (local) {
if (local->cardnr >= 0) {
/* no unregister function with hisax */
HiSax_closecard(local->cardnr);
}
}
pcmcia_disable_device(link);
} /* elsa_cs_release */
static int elsa_suspend(struct pcmcia_device *link)
{
local_info_t *dev = link->priv;
dev->busy = 1;
return 0;
}
static int elsa_resume(struct pcmcia_device *link)
{
local_info_t *dev = link->priv;
dev->busy = 0;
return 0;
}
static const struct pcmcia_device_id elsa_ids[] = {
PCMCIA_DEVICE_PROD_ID12("ELSA AG (Aachen, Germany)", "MicroLink ISDN/MC ", 0x983de2c4, 0x333ba257),
PCMCIA_DEVICE_PROD_ID12("ELSA GmbH, Aachen", "MicroLink ISDN/MC ", 0x639e5718, 0x333ba257),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, elsa_ids);
static struct pcmcia_driver elsa_cs_driver = {
.owner = THIS_MODULE,
.name = "elsa_cs",
.probe = elsa_cs_probe,
.remove = __devexit_p(elsa_cs_detach),
.id_table = elsa_ids,
.suspend = elsa_suspend,
.resume = elsa_resume,
};
static int __init init_elsa_cs(void)
{
return pcmcia_register_driver(&elsa_cs_driver);
}
static void __exit exit_elsa_cs(void)
{
pcmcia_unregister_driver(&elsa_cs_driver);
}
module_init(init_elsa_cs);
module_exit(exit_elsa_cs);
| gpl-2.0 |
BlurOS/android_kernel_pantech_msm8x74 | kernel/debug/kdb/kdb_main.c | 4308 | 71380 | /*
* Kernel Debugger Architecture Independent Main Code
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
* Xscale (R) modifications copyright (C) 2003 Intel Corporation.
* Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
*/
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sysrq.h>
#include <linux/smp.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/kdb.h>
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/nmi.h>
#include <linux/time.h>
#include <linux/ptrace.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/kdebug.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include "kdb_private.h"
#define GREP_LEN 256
char kdb_grep_string[GREP_LEN];
int kdb_grepping_flag;
EXPORT_SYMBOL(kdb_grepping_flag);
int kdb_grep_leading;
int kdb_grep_trailing;
/*
* Kernel debugger state flags
*/
int kdb_flags;
atomic_t kdb_event;
/*
* kdb_lock protects updates to kdb_initial_cpu. Used to
* single thread processors through the kernel debugger.
*/
int kdb_initial_cpu = -1; /* cpu number that owns kdb */
int kdb_nextline = 1;
int kdb_state; /* General KDB state */
struct task_struct *kdb_current_task;
EXPORT_SYMBOL(kdb_current_task);
struct pt_regs *kdb_current_regs;
const char *kdb_diemsg;
static int kdb_go_count;
#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC
static unsigned int kdb_continue_catastrophic =
CONFIG_KDB_CONTINUE_CATASTROPHIC;
#else
static unsigned int kdb_continue_catastrophic;
#endif
/* kdb_commands describes the available commands. */
static kdbtab_t *kdb_commands;
#define KDB_BASE_CMD_MAX 50
static int kdb_max_commands = KDB_BASE_CMD_MAX;
static kdbtab_t kdb_base_commands[KDB_BASE_CMD_MAX];
#define for_each_kdbcmd(cmd, num) \
for ((cmd) = kdb_base_commands, (num) = 0; \
num < kdb_max_commands; \
num++, num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++)
typedef struct _kdbmsg {
int km_diag; /* kdb diagnostic */
char *km_msg; /* Corresponding message text */
} kdbmsg_t;
#define KDBMSG(msgnum, text) \
{ KDB_##msgnum, text }
static kdbmsg_t kdbmsgs[] = {
KDBMSG(NOTFOUND, "Command Not Found"),
KDBMSG(ARGCOUNT, "Improper argument count, see usage."),
KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, "
"8 is only allowed on 64 bit systems"),
KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"),
KDBMSG(NOTENV, "Cannot find environment variable"),
KDBMSG(NOENVVALUE, "Environment variable should have value"),
KDBMSG(NOTIMP, "Command not implemented"),
KDBMSG(ENVFULL, "Environment full"),
KDBMSG(ENVBUFFULL, "Environment buffer full"),
KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
#ifdef CONFIG_CPU_XSCALE
KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
#else
KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"),
#endif
KDBMSG(DUPBPT, "Duplicate breakpoint address"),
KDBMSG(BPTNOTFOUND, "Breakpoint not found"),
KDBMSG(BADMODE, "Invalid IDMODE"),
KDBMSG(BADINT, "Illegal numeric value"),
KDBMSG(INVADDRFMT, "Invalid symbolic address format"),
KDBMSG(BADREG, "Invalid register name"),
KDBMSG(BADCPUNUM, "Invalid cpu number"),
KDBMSG(BADLENGTH, "Invalid length field"),
KDBMSG(NOBP, "No Breakpoint exists"),
KDBMSG(BADADDR, "Invalid address"),
};
#undef KDBMSG
static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
/*
* Initial environment. This is all kept static and local to
* this file. We don't want to rely on the memory allocation
* mechanisms in the kernel, so we use a very limited allocate-only
* heap for new and altered environment variables. The entire
* environment is limited to a fixed number of entries (add more
* to __env[] if required) and a fixed amount of heap (add more to
* KDB_ENVBUFSIZE if required).
*/
static char *__env[] = {
#if defined(CONFIG_SMP)
"PROMPT=[%d]kdb> ",
"MOREPROMPT=[%d]more> ",
#else
"PROMPT=kdb> ",
"MOREPROMPT=more> ",
#endif
"RADIX=16",
"MDCOUNT=8", /* lines of md output */
KDB_PLATFORM_ENV,
"DTABCOUNT=30",
"NOSECT=1",
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
(char *)0,
};
static const int __nenv = (sizeof(__env) / sizeof(char *));
struct task_struct *kdb_curr_task(int cpu)
{
struct task_struct *p = curr_task(cpu);
#ifdef _TIF_MCA_INIT
if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && KDB_TSK(cpu))
p = krp->p;
#endif
return p;
}
/*
* kdbgetenv - This function will return the character string value of
* an environment variable.
* Parameters:
* match A character string representing an environment variable.
* Returns:
* NULL No environment variable matches 'match'
* char* Pointer to string value of environment variable.
*/
char *kdbgetenv(const char *match)
{
char **ep = __env;
int matchlen = strlen(match);
int i;
for (i = 0; i < __nenv; i++) {
char *e = *ep++;
if (!e)
continue;
if ((strncmp(match, e, matchlen) == 0)
&& ((e[matchlen] == '\0')
|| (e[matchlen] == '='))) {
char *cp = strchr(e, '=');
return cp ? ++cp : "";
}
}
return NULL;
}
/*
* kdballocenv - This function is used to allocate bytes for
* environment entries.
* Parameters:
* match A character string representing a numeric value
* Outputs:
* *value the unsigned long representation of the env variable 'match'
* Returns:
* Zero on success, a kdb diagnostic on failure.
* Remarks:
* We use a static environment buffer (envbuffer) to hold the values
* of dynamically generated environment variables (see kdb_set). Buffer
* space once allocated is never free'd, so over time, the amount of space
* (currently 512 bytes) will be exhausted if env variables are changed
* frequently.
*/
static char *kdballocenv(size_t bytes)
{
#define KDB_ENVBUFSIZE 512
static char envbuffer[KDB_ENVBUFSIZE];
static int envbufsize;
char *ep = NULL;
if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
ep = &envbuffer[envbufsize];
envbufsize += bytes;
}
return ep;
}
/*
* kdbgetulenv - This function will return the value of an unsigned
* long-valued environment variable.
* Parameters:
* match A character string representing a numeric value
* Outputs:
* *value the unsigned long represntation of the env variable 'match'
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
static int kdbgetulenv(const char *match, unsigned long *value)
{
char *ep;
ep = kdbgetenv(match);
if (!ep)
return KDB_NOTENV;
if (strlen(ep) == 0)
return KDB_NOENVVALUE;
*value = simple_strtoul(ep, NULL, 0);
return 0;
}
/*
* kdbgetintenv - This function will return the value of an
* integer-valued environment variable.
* Parameters:
* match A character string representing an integer-valued env variable
* Outputs:
* *value the integer representation of the environment variable 'match'
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
int kdbgetintenv(const char *match, int *value)
{
unsigned long val;
int diag;
diag = kdbgetulenv(match, &val);
if (!diag)
*value = (int) val;
return diag;
}
/*
* kdbgetularg - This function will convert a numeric string into an
* unsigned long value.
* Parameters:
* arg A character string representing a numeric value
* Outputs:
* *value the unsigned long represntation of arg.
* Returns:
* Zero on success, a kdb diagnostic on failure.
*/
int kdbgetularg(const char *arg, unsigned long *value)
{
char *endp;
unsigned long val;
val = simple_strtoul(arg, &endp, 0);
if (endp == arg) {
/*
* Also try base 16, for us folks too lazy to type the
* leading 0x...
*/
val = simple_strtoul(arg, &endp, 16);
if (endp == arg)
return KDB_BADINT;
}
*value = val;
return 0;
}
int kdbgetu64arg(const char *arg, u64 *value)
{
char *endp;
u64 val;
val = simple_strtoull(arg, &endp, 0);
if (endp == arg) {
val = simple_strtoull(arg, &endp, 16);
if (endp == arg)
return KDB_BADINT;
}
*value = val;
return 0;
}
/*
* kdb_set - This function implements the 'set' command. Alter an
* existing environment variable or create a new one.
*/
int kdb_set(int argc, const char **argv)
{
int i;
char *ep;
size_t varlen, vallen;
/*
* we can be invoked two ways:
* set var=value argv[1]="var", argv[2]="value"
* set var = value argv[1]="var", argv[2]="=", argv[3]="value"
* - if the latter, shift 'em down.
*/
if (argc == 3) {
argv[2] = argv[3];
argc--;
}
if (argc != 2)
return KDB_ARGCOUNT;
/*
* Check for internal variables
*/
if (strcmp(argv[1], "KDBDEBUG") == 0) {
unsigned int debugflags;
char *cp;
debugflags = simple_strtoul(argv[2], &cp, 0);
if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) {
kdb_printf("kdb: illegal debug flags '%s'\n",
argv[2]);
return 0;
}
kdb_flags = (kdb_flags &
~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT))
| (debugflags << KDB_DEBUG_FLAG_SHIFT);
return 0;
}
/*
* Tokenizer squashed the '=' sign. argv[1] is variable
* name, argv[2] = value.
*/
varlen = strlen(argv[1]);
vallen = strlen(argv[2]);
ep = kdballocenv(varlen + vallen + 2);
if (ep == (char *)0)
return KDB_ENVBUFFULL;
sprintf(ep, "%s=%s", argv[1], argv[2]);
ep[varlen+vallen+1] = '\0';
for (i = 0; i < __nenv; i++) {
if (__env[i]
&& ((strncmp(__env[i], argv[1], varlen) == 0)
&& ((__env[i][varlen] == '\0')
|| (__env[i][varlen] == '=')))) {
__env[i] = ep;
return 0;
}
}
/*
* Wasn't existing variable. Fit into slot.
*/
for (i = 0; i < __nenv-1; i++) {
if (__env[i] == (char *)0) {
__env[i] = ep;
return 0;
}
}
return KDB_ENVFULL;
}
static int kdb_check_regs(void)
{
if (!kdb_current_regs) {
kdb_printf("No current kdb registers."
" You may need to select another task\n");
return KDB_BADREG;
}
return 0;
}
/*
* kdbgetaddrarg - This function is responsible for parsing an
* address-expression and returning the value of the expression,
* symbol name, and offset to the caller.
*
* The argument may consist of a numeric value (decimal or
* hexidecimal), a symbol name, a register name (preceded by the
* percent sign), an environment variable with a numeric value
* (preceded by a dollar sign) or a simple arithmetic expression
* consisting of a symbol name, +/-, and a numeric constant value
* (offset).
* Parameters:
* argc - count of arguments in argv
* argv - argument vector
* *nextarg - index to next unparsed argument in argv[]
* regs - Register state at time of KDB entry
* Outputs:
* *value - receives the value of the address-expression
* *offset - receives the offset specified, if any
* *name - receives the symbol name, if any
* *nextarg - index to next unparsed argument in argv[]
* Returns:
* zero is returned on success, a kdb diagnostic code is
* returned on error.
*/
int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
unsigned long *value, long *offset,
char **name)
{
unsigned long addr;
unsigned long off = 0;
int positive;
int diag;
int found = 0;
char *symname;
char symbol = '\0';
char *cp;
kdb_symtab_t symtab;
/*
* Process arguments which follow the following syntax:
*
* symbol | numeric-address [+/- numeric-offset]
* %register
* $environment-variable
*/
if (*nextarg > argc)
return KDB_ARGCOUNT;
symname = (char *)argv[*nextarg];
/*
* If there is no whitespace between the symbol
* or address and the '+' or '-' symbols, we
* remember the character and replace it with a
* null so the symbol/value can be properly parsed
*/
cp = strpbrk(symname, "+-");
if (cp != NULL) {
symbol = *cp;
*cp++ = '\0';
}
if (symname[0] == '$') {
diag = kdbgetulenv(&symname[1], &addr);
if (diag)
return diag;
} else if (symname[0] == '%') {
diag = kdb_check_regs();
if (diag)
return diag;
/* Implement register values with % at a later time as it is
* arch optional.
*/
return KDB_NOTIMP;
} else {
found = kdbgetsymval(symname, &symtab);
if (found) {
addr = symtab.sym_start;
} else {
diag = kdbgetularg(argv[*nextarg], &addr);
if (diag)
return diag;
}
}
if (!found)
found = kdbnearsym(addr, &symtab);
(*nextarg)++;
if (name)
*name = symname;
if (value)
*value = addr;
if (offset && name && *name)
*offset = addr - symtab.sym_start;
if ((*nextarg > argc)
&& (symbol == '\0'))
return 0;
/*
* check for +/- and offset
*/
if (symbol == '\0') {
if ((argv[*nextarg][0] != '+')
&& (argv[*nextarg][0] != '-')) {
/*
* Not our argument. Return.
*/
return 0;
} else {
positive = (argv[*nextarg][0] == '+');
(*nextarg)++;
}
} else
positive = (symbol == '+');
/*
* Now there must be an offset!
*/
if ((*nextarg > argc)
&& (symbol == '\0')) {
return KDB_INVADDRFMT;
}
if (!symbol) {
cp = (char *)argv[*nextarg];
(*nextarg)++;
}
diag = kdbgetularg(cp, &off);
if (diag)
return diag;
if (!positive)
off = -off;
if (offset)
*offset += off;
if (value)
*value += off;
return 0;
}
static void kdb_cmderror(int diag)
{
int i;
if (diag >= 0) {
kdb_printf("no error detected (diagnostic is %d)\n", diag);
return;
}
for (i = 0; i < __nkdb_err; i++) {
if (kdbmsgs[i].km_diag == diag) {
kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg);
return;
}
}
kdb_printf("Unknown diag %d\n", -diag);
}
/*
* kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd'
* command which defines one command as a set of other commands,
* terminated by endefcmd. kdb_defcmd processes the initial
* 'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for
* the following commands until 'endefcmd'.
* Inputs:
* argc argument count
* argv argument vector
* Returns:
* zero for success, a kdb diagnostic if error
*/
struct defcmd_set {
int count;
int usable;
char *name;
char *usage;
char *help;
char **command;
};
static struct defcmd_set *defcmd_set;
static int defcmd_set_count;
static int defcmd_in_progress;
/* Forward references */
static int kdb_exec_defcmd(int argc, const char **argv);
static int kdb_defcmd2(const char *cmdstr, const char *argv0)
{
struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
char **save_command = s->command;
if (strcmp(argv0, "endefcmd") == 0) {
defcmd_in_progress = 0;
if (!s->count)
s->usable = 0;
if (s->usable)
kdb_register(s->name, kdb_exec_defcmd,
s->usage, s->help, 0);
return 0;
}
if (!s->usable)
return KDB_NOTIMP;
s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
if (!s->command) {
kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
cmdstr);
s->usable = 0;
return KDB_NOTIMP;
}
memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB);
kfree(save_command);
return 0;
}
static int kdb_defcmd(int argc, const char **argv)
{
struct defcmd_set *save_defcmd_set = defcmd_set, *s;
if (defcmd_in_progress) {
kdb_printf("kdb: nested defcmd detected, assuming missing "
"endefcmd\n");
kdb_defcmd2("endefcmd", "endefcmd");
}
if (argc == 0) {
int i;
for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) {
kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name,
s->usage, s->help);
for (i = 0; i < s->count; ++i)
kdb_printf("%s", s->command[i]);
kdb_printf("endefcmd\n");
}
return 0;
}
if (argc != 3)
return KDB_ARGCOUNT;
defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
GFP_KDB);
if (!defcmd_set) {
kdb_printf("Could not allocate new defcmd_set entry for %s\n",
argv[1]);
defcmd_set = save_defcmd_set;
return KDB_NOTIMP;
}
memcpy(defcmd_set, save_defcmd_set,
defcmd_set_count * sizeof(*defcmd_set));
kfree(save_defcmd_set);
s = defcmd_set + defcmd_set_count;
memset(s, 0, sizeof(*s));
s->usable = 1;
s->name = kdb_strdup(argv[1], GFP_KDB);
s->usage = kdb_strdup(argv[2], GFP_KDB);
s->help = kdb_strdup(argv[3], GFP_KDB);
if (s->usage[0] == '"') {
strcpy(s->usage, s->usage+1);
s->usage[strlen(s->usage)-1] = '\0';
}
if (s->help[0] == '"') {
strcpy(s->help, s->help+1);
s->help[strlen(s->help)-1] = '\0';
}
++defcmd_set_count;
defcmd_in_progress = 1;
return 0;
}
/*
* kdb_exec_defcmd - Execute the set of commands associated with this
* defcmd name.
* Inputs:
* argc argument count
* argv argument vector
* Returns:
* zero for success, a kdb diagnostic if error
*/
static int kdb_exec_defcmd(int argc, const char **argv)
{
int i, ret;
struct defcmd_set *s;
if (argc != 0)
return KDB_ARGCOUNT;
for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) {
if (strcmp(s->name, argv[0]) == 0)
break;
}
if (i == defcmd_set_count) {
kdb_printf("kdb_exec_defcmd: could not find commands for %s\n",
argv[0]);
return KDB_NOTIMP;
}
for (i = 0; i < s->count; ++i) {
/* Recursive use of kdb_parse, do not use argv after
* this point */
argv = NULL;
kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]);
ret = kdb_parse(s->command[i]);
if (ret)
return ret;
}
return 0;
}
/* Command history */
#define KDB_CMD_HISTORY_COUNT 32
#define CMD_BUFLEN 200 /* kdb_printf: max printline
* size == 256 */
static unsigned int cmd_head, cmd_tail;
static unsigned int cmdptr;
static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN];
static char cmd_cur[CMD_BUFLEN];
/*
* The "str" argument may point to something like | grep xyz
*/
static void parse_grep(const char *str)
{
int len;
char *cp = (char *)str, *cp2;
/* sanity check: we should have been called with the \ first */
if (*cp != '|')
return;
cp++;
while (isspace(*cp))
cp++;
if (strncmp(cp, "grep ", 5)) {
kdb_printf("invalid 'pipe', see grephelp\n");
return;
}
cp += 5;
while (isspace(*cp))
cp++;
cp2 = strchr(cp, '\n');
if (cp2)
*cp2 = '\0'; /* remove the trailing newline */
len = strlen(cp);
if (len == 0) {
kdb_printf("invalid 'pipe', see grephelp\n");
return;
}
/* now cp points to a nonzero length search string */
if (*cp == '"') {
/* allow it be "x y z" by removing the "'s - there must
be two of them */
cp++;
cp2 = strchr(cp, '"');
if (!cp2) {
kdb_printf("invalid quoted string, see grephelp\n");
return;
}
*cp2 = '\0'; /* end the string where the 2nd " was */
}
kdb_grep_leading = 0;
if (*cp == '^') {
kdb_grep_leading = 1;
cp++;
}
len = strlen(cp);
kdb_grep_trailing = 0;
if (*(cp+len-1) == '$') {
kdb_grep_trailing = 1;
*(cp+len-1) = '\0';
}
len = strlen(cp);
if (!len)
return;
if (len >= GREP_LEN) {
kdb_printf("search string too long\n");
return;
}
strcpy(kdb_grep_string, cp);
kdb_grepping_flag++;
return;
}
/*
* kdb_parse - Parse the command line, search the command table for a
* matching command and invoke the command function. This
* function may be called recursively, if it is, the second call
* will overwrite argv and cbuf. It is the caller's
* responsibility to save their argv if they recursively call
* kdb_parse().
* Parameters:
* cmdstr The input command line to be parsed.
* regs The registers at the time kdb was entered.
* Returns:
* Zero for success, a kdb diagnostic if failure.
* Remarks:
* Limited to 20 tokens.
*
* Real rudimentary tokenization. Basically only whitespace
* is considered a token delimeter (but special consideration
* is taken of the '=' sign as used by the 'set' command).
*
* The algorithm used to tokenize the input string relies on
* there being at least one whitespace (or otherwise useless)
* character between tokens as the character immediately following
* the token is altered in-place to a null-byte to terminate the
* token string.
*/
#define MAXARGC 20
int kdb_parse(const char *cmdstr)
{
static char *argv[MAXARGC];
static int argc;
static char cbuf[CMD_BUFLEN+2];
char *cp;
char *cpp, quoted;
kdbtab_t *tp;
int i, escaped, ignore_errors = 0, check_grep;
/*
* First tokenize the command string.
*/
cp = (char *)cmdstr;
kdb_grepping_flag = check_grep = 0;
if (KDB_FLAG(CMD_INTERRUPT)) {
/* Previous command was interrupted, newline must not
* repeat the command */
KDB_FLAG_CLEAR(CMD_INTERRUPT);
KDB_STATE_SET(PAGER);
argc = 0; /* no repeat */
}
if (*cp != '\n' && *cp != '\0') {
argc = 0;
cpp = cbuf;
while (*cp) {
/* skip whitespace */
while (isspace(*cp))
cp++;
if ((*cp == '\0') || (*cp == '\n') ||
(*cp == '#' && !defcmd_in_progress))
break;
/* special case: check for | grep pattern */
if (*cp == '|') {
check_grep++;
break;
}
if (cpp >= cbuf + CMD_BUFLEN) {
kdb_printf("kdb_parse: command buffer "
"overflow, command ignored\n%s\n",
cmdstr);
return KDB_NOTFOUND;
}
if (argc >= MAXARGC - 1) {
kdb_printf("kdb_parse: too many arguments, "
"command ignored\n%s\n", cmdstr);
return KDB_NOTFOUND;
}
argv[argc++] = cpp;
escaped = 0;
quoted = '\0';
/* Copy to next unquoted and unescaped
* whitespace or '=' */
while (*cp && *cp != '\n' &&
(escaped || quoted || !isspace(*cp))) {
if (cpp >= cbuf + CMD_BUFLEN)
break;
if (escaped) {
escaped = 0;
*cpp++ = *cp++;
continue;
}
if (*cp == '\\') {
escaped = 1;
++cp;
continue;
}
if (*cp == quoted)
quoted = '\0';
else if (*cp == '\'' || *cp == '"')
quoted = *cp;
*cpp = *cp++;
if (*cpp == '=' && !quoted)
break;
++cpp;
}
*cpp++ = '\0'; /* Squash a ws or '=' character */
}
}
if (!argc)
return 0;
if (check_grep)
parse_grep(cp);
if (defcmd_in_progress) {
int result = kdb_defcmd2(cmdstr, argv[0]);
if (!defcmd_in_progress) {
argc = 0; /* avoid repeat on endefcmd */
*(argv[0]) = '\0';
}
return result;
}
if (argv[0][0] == '-' && argv[0][1] &&
(argv[0][1] < '0' || argv[0][1] > '9')) {
ignore_errors = 1;
++argv[0];
}
for_each_kdbcmd(tp, i) {
if (tp->cmd_name) {
/*
* If this command is allowed to be abbreviated,
* check to see if this is it.
*/
if (tp->cmd_minlen
&& (strlen(argv[0]) <= tp->cmd_minlen)) {
if (strncmp(argv[0],
tp->cmd_name,
tp->cmd_minlen) == 0) {
break;
}
}
if (strcmp(argv[0], tp->cmd_name) == 0)
break;
}
}
/*
* If we don't find a command by this name, see if the first
* few characters of this match any of the known commands.
* e.g., md1c20 should match md.
*/
if (i == kdb_max_commands) {
for_each_kdbcmd(tp, i) {
if (tp->cmd_name) {
if (strncmp(argv[0],
tp->cmd_name,
strlen(tp->cmd_name)) == 0) {
break;
}
}
}
}
if (i < kdb_max_commands) {
int result;
KDB_STATE_SET(CMD);
result = (*tp->cmd_func)(argc-1, (const char **)argv);
if (result && ignore_errors && result > KDB_CMD_GO)
result = 0;
KDB_STATE_CLEAR(CMD);
switch (tp->cmd_repeat) {
case KDB_REPEAT_NONE:
argc = 0;
if (argv[0])
*(argv[0]) = '\0';
break;
case KDB_REPEAT_NO_ARGS:
argc = 1;
if (argv[1])
*(argv[1]) = '\0';
break;
case KDB_REPEAT_WITH_ARGS:
break;
}
return result;
}
/*
* If the input with which we were presented does not
* map to an existing command, attempt to parse it as an
* address argument and display the result. Useful for
* obtaining the address of a variable, or the nearest symbol
* to an address contained in a register.
*/
{
unsigned long value;
char *name = NULL;
long offset;
int nextarg = 0;
if (kdbgetaddrarg(0, (const char **)argv, &nextarg,
&value, &offset, &name)) {
return KDB_NOTFOUND;
}
kdb_printf("%s = ", argv[0]);
kdb_symbol_print(value, NULL, KDB_SP_DEFAULT);
kdb_printf("\n");
return 0;
}
}
static int handle_ctrl_cmd(char *cmd)
{
#define CTRL_P 16
#define CTRL_N 14
/* initial situation */
if (cmd_head == cmd_tail)
return 0;
switch (*cmd) {
case CTRL_P:
if (cmdptr != cmd_tail)
cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
case CTRL_N:
if (cmdptr != cmd_head)
cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
}
return 0;
}
/*
* kdb_reboot - This function implements the 'reboot' command. Reboot
* the system immediately, or loop for ever on failure.
*/
static int kdb_reboot(int argc, const char **argv)
{
emergency_restart();
kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n");
while (1)
cpu_relax();
/* NOTREACHED */
return 0;
}
static void kdb_dumpregs(struct pt_regs *regs)
{
int old_lvl = console_loglevel;
console_loglevel = 15;
kdb_trap_printk++;
show_regs(regs);
kdb_trap_printk--;
kdb_printf("\n");
console_loglevel = old_lvl;
}
void kdb_set_current_task(struct task_struct *p)
{
kdb_current_task = p;
if (kdb_task_has_cpu(p)) {
kdb_current_regs = KDB_TSKREGS(kdb_process_cpu(p));
return;
}
kdb_current_regs = NULL;
}
/*
* kdb_local - The main code for kdb. This routine is invoked on a
* specific processor, it is not global. The main kdb() routine
* ensures that only one processor at a time is in this routine.
* This code is called with the real reason code on the first
* entry to a kdb session, thereafter it is called with reason
* SWITCH, even if the user goes back to the original cpu.
* Inputs:
* reason The reason KDB was invoked
* error The hardware-defined error code
* regs The exception frame at time of fault/breakpoint.
* db_result Result code from the break or debug point.
* Returns:
* 0 KDB was invoked for an event which it wasn't responsible
* 1 KDB handled the event for which it was invoked.
* KDB_CMD_GO User typed 'go'.
* KDB_CMD_CPU User switched to another cpu.
* KDB_CMD_SS Single step.
* KDB_CMD_SSB Single step until branch.
*/
static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_dbtrap_t db_result)
{
char *cmdbuf;
int diag;
struct task_struct *kdb_current =
kdb_curr_task(raw_smp_processor_id());
KDB_DEBUG_STATE("kdb_local 1", reason);
kdb_go_count = 0;
if (reason == KDB_REASON_DEBUG) {
/* special case below */
} else {
kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
kdb_current, kdb_current ? kdb_current->pid : 0);
#if defined(CONFIG_SMP)
kdb_printf("on processor %d ", raw_smp_processor_id());
#endif
}
switch (reason) {
case KDB_REASON_DEBUG:
{
/*
* If re-entering kdb after a single step
* command, don't print the message.
*/
switch (db_result) {
case KDB_DB_BPT:
kdb_printf("\nEntering kdb (0x%p, pid %d) ",
kdb_current, kdb_current->pid);
#if defined(CONFIG_SMP)
kdb_printf("on processor %d ", raw_smp_processor_id());
#endif
kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
break;
case KDB_DB_SSB:
/*
* In the midst of ssb command. Just return.
*/
KDB_DEBUG_STATE("kdb_local 3", reason);
return KDB_CMD_SSB; /* Continue with SSB command */
break;
case KDB_DB_SS:
break;
case KDB_DB_SSBPT:
KDB_DEBUG_STATE("kdb_local 4", reason);
return 1; /* kdba_db_trap did the work */
default:
kdb_printf("kdb: Bad result from kdba_db_trap: %d\n",
db_result);
break;
}
}
break;
case KDB_REASON_ENTER:
if (KDB_STATE(KEYBOARD))
kdb_printf("due to Keyboard Entry\n");
else
kdb_printf("due to KDB_ENTER()\n");
break;
case KDB_REASON_KEYBOARD:
KDB_STATE_SET(KEYBOARD);
kdb_printf("due to Keyboard Entry\n");
break;
case KDB_REASON_ENTER_SLAVE:
/* drop through, slaves only get released via cpu switch */
case KDB_REASON_SWITCH:
kdb_printf("due to cpu switch\n");
break;
case KDB_REASON_OOPS:
kdb_printf("Oops: %s\n", kdb_diemsg);
kdb_printf("due to oops @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
kdb_dumpregs(regs);
break;
case KDB_REASON_NMI:
kdb_printf("due to NonMaskable Interrupt @ "
kdb_machreg_fmt "\n",
instruction_pointer(regs));
kdb_dumpregs(regs);
break;
case KDB_REASON_SSTEP:
case KDB_REASON_BREAK:
kdb_printf("due to %s @ " kdb_machreg_fmt "\n",
reason == KDB_REASON_BREAK ?
"Breakpoint" : "SS trap", instruction_pointer(regs));
/*
* Determine if this breakpoint is one that we
* are interested in.
*/
if (db_result != KDB_DB_BPT) {
kdb_printf("kdb: error return from kdba_bp_trap: %d\n",
db_result);
KDB_DEBUG_STATE("kdb_local 6", reason);
return 0; /* Not for us, dismiss it */
}
break;
case KDB_REASON_RECURSE:
kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
break;
default:
kdb_printf("kdb: unexpected reason code: %d\n", reason);
KDB_DEBUG_STATE("kdb_local 8", reason);
return 0; /* Not for us, dismiss it */
}
while (1) {
/*
* Initialize pager context.
*/
kdb_nextline = 1;
KDB_STATE_CLEAR(SUPPRESS);
cmdbuf = cmd_cur;
*cmdbuf = '\0';
*(cmd_hist[cmd_head]) = '\0';
if (KDB_FLAG(ONLY_DO_DUMP)) {
/* kdb is off but a catastrophic error requires a dump.
* Take the dump and reboot.
* Turn on logging so the kdb output appears in the log
* buffer in the dump.
*/
const char *setargs[] = { "set", "LOGGING", "1" };
kdb_set(2, setargs);
kdb_reboot(0, NULL);
/*NOTREACHED*/
}
do_full_getstr:
#if defined(CONFIG_SMP)
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
raw_smp_processor_id());
#else
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
#endif
if (defcmd_in_progress)
strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
/*
* Fetch command from keyboard
*/
cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str);
if (*cmdbuf != '\n') {
if (*cmdbuf < 32) {
if (cmdptr == cmd_head) {
strncpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
*(cmd_hist[cmd_head] +
strlen(cmd_hist[cmd_head])-1) = '\0';
}
if (!handle_ctrl_cmd(cmdbuf))
*(cmd_cur+strlen(cmd_cur)-1) = '\0';
cmdbuf = cmd_cur;
goto do_full_getstr;
} else {
strncpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
}
cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT;
if (cmd_head == cmd_tail)
cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT;
}
cmdptr = cmd_head;
diag = kdb_parse(cmdbuf);
if (diag == KDB_NOTFOUND) {
kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
diag = 0;
}
if (diag == KDB_CMD_GO
|| diag == KDB_CMD_CPU
|| diag == KDB_CMD_SS
|| diag == KDB_CMD_SSB
|| diag == KDB_CMD_KGDB)
break;
if (diag)
kdb_cmderror(diag);
}
KDB_DEBUG_STATE("kdb_local 9", diag);
return diag;
}
/*
* kdb_print_state - Print the state data for the current processor
* for debugging.
* Inputs:
* text Identifies the debug point
* value Any integer value to be printed, e.g. reason code.
*/
void kdb_print_state(const char *text, int value)
{
kdb_printf("state: %s cpu %d value %d initial %d state %x\n",
text, raw_smp_processor_id(), value, kdb_initial_cpu,
kdb_state);
}
/*
* kdb_main_loop - After initial setup and assignment of the
* controlling cpu, all cpus are in this loop. One cpu is in
* control and will issue the kdb prompt, the others will spin
* until 'go' or cpu switch.
*
* To get a consistent view of the kernel stacks for all
* processes, this routine is invoked from the main kdb code via
* an architecture specific routine. kdba_main_loop is
* responsible for making the kernel stacks consistent for all
* processes, there should be no difference between a blocked
* process and a running process as far as kdb is concerned.
* Inputs:
* reason The reason KDB was invoked
* error The hardware-defined error code
* reason2 kdb's current reason code.
* Initially error but can change
* according to kdb state.
* db_result Result code from break or debug point.
* regs The exception frame at time of fault/breakpoint.
* should always be valid.
* Returns:
* 0 KDB was invoked for an event which it wasn't responsible
* 1 KDB handled the event for which it was invoked.
*/
int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
kdb_dbtrap_t db_result, struct pt_regs *regs)
{
int result = 1;
/* Stay in kdb() until 'go', 'ss[b]' or an error */
while (1) {
/*
* All processors except the one that is in control
* will spin here.
*/
KDB_DEBUG_STATE("kdb_main_loop 1", reason);
while (KDB_STATE(HOLD_CPU)) {
/* state KDB is turned off by kdb_cpu to see if the
* other cpus are still live, each cpu in this loop
* turns it back on.
*/
if (!KDB_STATE(KDB))
KDB_STATE_SET(KDB);
}
KDB_STATE_CLEAR(SUPPRESS);
KDB_DEBUG_STATE("kdb_main_loop 2", reason);
if (KDB_STATE(LEAVING))
break; /* Another cpu said 'go' */
/* Still using kdb, this processor is in control */
result = kdb_local(reason2, error, regs, db_result);
KDB_DEBUG_STATE("kdb_main_loop 3", result);
if (result == KDB_CMD_CPU)
break;
if (result == KDB_CMD_SS) {
KDB_STATE_SET(DOING_SS);
break;
}
if (result == KDB_CMD_SSB) {
KDB_STATE_SET(DOING_SS);
KDB_STATE_SET(DOING_SSB);
break;
}
if (result == KDB_CMD_KGDB) {
if (!KDB_STATE(DOING_KGDB))
kdb_printf("Entering please attach debugger "
"or use $D#44+ or $3#33\n");
break;
}
if (result && result != 1 && result != KDB_CMD_GO)
kdb_printf("\nUnexpected kdb_local return code %d\n",
result);
KDB_DEBUG_STATE("kdb_main_loop 4", reason);
break;
}
if (KDB_STATE(DOING_SS))
KDB_STATE_CLEAR(SSBPT);
/* Clean up any keyboard devices before leaving */
kdb_kbd_cleanup_state();
return result;
}
/*
* kdb_mdr - This function implements the guts of the 'mdr', memory
* read command.
* mdr <addr arg>,<byte count>
* Inputs:
* addr Start address
* count Number of bytes
* Returns:
* Always 0. Any errors are detected and printed by kdb_getarea.
*/
static int kdb_mdr(unsigned long addr, unsigned int count)
{
unsigned char c;
while (count--) {
if (kdb_getarea(c, addr))
return 0;
kdb_printf("%02x", c);
addr++;
}
kdb_printf("\n");
return 0;
}
/*
* kdb_md - This function implements the 'md', 'md1', 'md2', 'md4',
* 'md8' 'mdr' and 'mds' commands.
*
* md|mds [<addr arg> [<line count> [<radix>]]]
* mdWcN [<addr arg> [<line count> [<radix>]]]
* where W = is the width (1, 2, 4 or 8) and N is the count.
* for eg., md1c20 reads 20 bytes, 1 at a time.
* mdr <addr arg>,<byte count>
*/
static void kdb_md_line(const char *fmtstr, unsigned long addr,
int symbolic, int nosect, int bytesperword,
int num, int repeat, int phys)
{
/* print just one line of data */
kdb_symtab_t symtab;
char cbuf[32];
char *c = cbuf;
int i;
unsigned long word;
memset(cbuf, '\0', sizeof(cbuf));
if (phys)
kdb_printf("phys " kdb_machreg_fmt0 " ", addr);
else
kdb_printf(kdb_machreg_fmt0 " ", addr);
for (i = 0; i < num && repeat--; i++) {
if (phys) {
if (kdb_getphysword(&word, addr, bytesperword))
break;
} else if (kdb_getword(&word, addr, bytesperword))
break;
kdb_printf(fmtstr, word);
if (symbolic)
kdbnearsym(word, &symtab);
else
memset(&symtab, 0, sizeof(symtab));
if (symtab.sym_name) {
kdb_symbol_print(word, &symtab, 0);
if (!nosect) {
kdb_printf("\n");
kdb_printf(" %s %s "
kdb_machreg_fmt " "
kdb_machreg_fmt " "
kdb_machreg_fmt, symtab.mod_name,
symtab.sec_name, symtab.sec_start,
symtab.sym_start, symtab.sym_end);
}
addr += bytesperword;
} else {
union {
u64 word;
unsigned char c[8];
} wc;
unsigned char *cp;
#ifdef __BIG_ENDIAN
cp = wc.c + 8 - bytesperword;
#else
cp = wc.c;
#endif
wc.word = word;
#define printable_char(c) \
({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
switch (bytesperword) {
case 8:
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
addr += 4;
case 4:
*c++ = printable_char(*cp++);
*c++ = printable_char(*cp++);
addr += 2;
case 2:
*c++ = printable_char(*cp++);
addr++;
case 1:
*c++ = printable_char(*cp++);
addr++;
break;
}
#undef printable_char
}
}
kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1),
" ", cbuf);
}
static int kdb_md(int argc, const char **argv)
{
static unsigned long last_addr;
static int last_radix, last_bytesperword, last_repeat;
int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat;
int nosect = 0;
char fmtchar, fmtstr[64];
unsigned long addr;
unsigned long word;
long offset = 0;
int symbolic = 0;
int valid = 0;
int phys = 0;
kdbgetintenv("MDCOUNT", &mdcount);
kdbgetintenv("RADIX", &radix);
kdbgetintenv("BYTESPERWORD", &bytesperword);
/* Assume 'md <addr>' and start with environment values */
repeat = mdcount * 16 / bytesperword;
if (strcmp(argv[0], "mdr") == 0) {
if (argc != 2)
return KDB_ARGCOUNT;
valid = 1;
} else if (isdigit(argv[0][2])) {
bytesperword = (int)(argv[0][2] - '0');
if (bytesperword == 0) {
bytesperword = last_bytesperword;
if (bytesperword == 0)
bytesperword = 4;
}
last_bytesperword = bytesperword;
repeat = mdcount * 16 / bytesperword;
if (!argv[0][3])
valid = 1;
else if (argv[0][3] == 'c' && argv[0][4]) {
char *p;
repeat = simple_strtoul(argv[0] + 4, &p, 10);
mdcount = ((repeat * bytesperword) + 15) / 16;
valid = !*p;
}
last_repeat = repeat;
} else if (strcmp(argv[0], "md") == 0)
valid = 1;
else if (strcmp(argv[0], "mds") == 0)
valid = 1;
else if (strcmp(argv[0], "mdp") == 0) {
phys = valid = 1;
}
if (!valid)
return KDB_NOTFOUND;
if (argc == 0) {
if (last_addr == 0)
return KDB_ARGCOUNT;
addr = last_addr;
radix = last_radix;
bytesperword = last_bytesperword;
repeat = last_repeat;
mdcount = ((repeat * bytesperword) + 15) / 16;
}
if (argc) {
unsigned long val;
int diag, nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
&offset, NULL);
if (diag)
return diag;
if (argc > nextarg+2)
return KDB_ARGCOUNT;
if (argc >= nextarg) {
diag = kdbgetularg(argv[nextarg], &val);
if (!diag) {
mdcount = (int) val;
repeat = mdcount * 16 / bytesperword;
}
}
if (argc >= nextarg+1) {
diag = kdbgetularg(argv[nextarg+1], &val);
if (!diag)
radix = (int) val;
}
}
if (strcmp(argv[0], "mdr") == 0)
return kdb_mdr(addr, mdcount);
switch (radix) {
case 10:
fmtchar = 'd';
break;
case 16:
fmtchar = 'x';
break;
case 8:
fmtchar = 'o';
break;
default:
return KDB_BADRADIX;
}
last_radix = radix;
if (bytesperword > KDB_WORD_SIZE)
return KDB_BADWIDTH;
switch (bytesperword) {
case 8:
sprintf(fmtstr, "%%16.16l%c ", fmtchar);
break;
case 4:
sprintf(fmtstr, "%%8.8l%c ", fmtchar);
break;
case 2:
sprintf(fmtstr, "%%4.4l%c ", fmtchar);
break;
case 1:
sprintf(fmtstr, "%%2.2l%c ", fmtchar);
break;
default:
return KDB_BADWIDTH;
}
last_repeat = repeat;
last_bytesperword = bytesperword;
if (strcmp(argv[0], "mds") == 0) {
symbolic = 1;
/* Do not save these changes as last_*, they are temporary mds
* overrides.
*/
bytesperword = KDB_WORD_SIZE;
repeat = mdcount;
kdbgetintenv("NOSECT", &nosect);
}
/* Round address down modulo BYTESPERWORD */
addr &= ~(bytesperword-1);
while (repeat > 0) {
unsigned long a;
int n, z, num = (symbolic ? 1 : (16 / bytesperword));
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) {
if (phys) {
if (kdb_getphysword(&word, a, bytesperword)
|| word)
break;
} else if (kdb_getword(&word, a, bytesperword) || word)
break;
}
n = min(num, repeat);
kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword,
num, repeat, phys);
addr += bytesperword * n;
repeat -= n;
z = (z + num - 1) / num;
if (z > 2) {
int s = num * (z-2);
kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0
" zero suppressed\n",
addr, addr + bytesperword * s - 1);
addr += bytesperword * s;
repeat -= s;
}
}
last_addr = addr;
return 0;
}
/*
* kdb_mm - This function implements the 'mm' command.
* mm address-expression new-value
* Remarks:
* mm works on machine words, mmW works on bytes.
*/
static int kdb_mm(int argc, const char **argv)
{
int diag;
unsigned long addr;
long offset = 0;
unsigned long contents;
int nextarg;
int width;
if (argv[0][2] && !isdigit(argv[0][2]))
return KDB_NOTFOUND;
if (argc < 2)
return KDB_ARGCOUNT;
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
if (diag)
return diag;
if (nextarg > argc)
return KDB_ARGCOUNT;
diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL);
if (diag)
return diag;
if (nextarg != argc + 1)
return KDB_ARGCOUNT;
width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE);
diag = kdb_putword(addr, contents, width);
if (diag)
return diag;
kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents);
return 0;
}
/*
* kdb_go - This function implements the 'go' command.
* go [address-expression]
*/
static int kdb_go(int argc, const char **argv)
{
unsigned long addr;
int diag;
int nextarg;
long offset;
if (raw_smp_processor_id() != kdb_initial_cpu) {
kdb_printf("go must execute on the entry cpu, "
"please use \"cpu %d\" and then execute go\n",
kdb_initial_cpu);
return KDB_BADCPUNUM;
}
if (argc == 1) {
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg,
&addr, &offset, NULL);
if (diag)
return diag;
} else if (argc) {
return KDB_ARGCOUNT;
}
diag = KDB_CMD_GO;
if (KDB_FLAG(CATASTROPHIC)) {
kdb_printf("Catastrophic error detected\n");
kdb_printf("kdb_continue_catastrophic=%d, ",
kdb_continue_catastrophic);
if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) {
kdb_printf("type go a second time if you really want "
"to continue\n");
return 0;
}
if (kdb_continue_catastrophic == 2) {
kdb_printf("forcing reboot\n");
kdb_reboot(0, NULL);
}
kdb_printf("attempting to continue\n");
}
return diag;
}
/*
* kdb_rd - This function implements the 'rd' command.
*/
static int kdb_rd(int argc, const char **argv)
{
int len = kdb_check_regs();
#if DBG_MAX_REG_NUM > 0
int i;
char *rname;
int rsize;
u64 reg64;
u32 reg32;
u16 reg16;
u8 reg8;
if (len)
return len;
for (i = 0; i < DBG_MAX_REG_NUM; i++) {
rsize = dbg_reg_def[i].size * 2;
if (rsize > 16)
rsize = 2;
if (len + strlen(dbg_reg_def[i].name) + 4 + rsize > 80) {
len = 0;
kdb_printf("\n");
}
if (len)
len += kdb_printf(" ");
switch(dbg_reg_def[i].size * 8) {
case 8:
rname = dbg_get_reg(i, ®8, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %02x", rname, reg8);
break;
case 16:
rname = dbg_get_reg(i, ®16, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %04x", rname, reg16);
break;
case 32:
rname = dbg_get_reg(i, ®32, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %08x", rname, reg32);
break;
case 64:
rname = dbg_get_reg(i, ®64, kdb_current_regs);
if (!rname)
break;
len += kdb_printf("%s: %016llx", rname, reg64);
break;
default:
len += kdb_printf("%s: ??", dbg_reg_def[i].name);
}
}
kdb_printf("\n");
#else
if (len)
return len;
kdb_dumpregs(kdb_current_regs);
#endif
return 0;
}
/*
* kdb_rm - This function implements the 'rm' (register modify) command.
* rm register-name new-contents
* Remarks:
* Allows register modification with the same restrictions as gdb
*/
static int kdb_rm(int argc, const char **argv)
{
#if DBG_MAX_REG_NUM > 0
int diag;
const char *rname;
int i;
u64 reg64;
u32 reg32;
u16 reg16;
u8 reg8;
if (argc != 2)
return KDB_ARGCOUNT;
/*
* Allow presence or absence of leading '%' symbol.
*/
rname = argv[1];
if (*rname == '%')
rname++;
diag = kdbgetu64arg(argv[2], ®64);
if (diag)
return diag;
diag = kdb_check_regs();
if (diag)
return diag;
diag = KDB_BADREG;
for (i = 0; i < DBG_MAX_REG_NUM; i++) {
if (strcmp(rname, dbg_reg_def[i].name) == 0) {
diag = 0;
break;
}
}
if (!diag) {
switch(dbg_reg_def[i].size * 8) {
case 8:
reg8 = reg64;
dbg_set_reg(i, ®8, kdb_current_regs);
break;
case 16:
reg16 = reg64;
dbg_set_reg(i, ®16, kdb_current_regs);
break;
case 32:
reg32 = reg64;
dbg_set_reg(i, ®32, kdb_current_regs);
break;
case 64:
dbg_set_reg(i, ®64, kdb_current_regs);
break;
}
}
return diag;
#else
kdb_printf("ERROR: Register set currently not implemented\n");
return 0;
#endif
}
#if defined(CONFIG_MAGIC_SYSRQ)
/*
* kdb_sr - This function implements the 'sr' (SYSRQ key) command
* which interfaces to the soi-disant MAGIC SYSRQ functionality.
* sr <magic-sysrq-code>
*/
static int kdb_sr(int argc, const char **argv)
{
if (argc != 1)
return KDB_ARGCOUNT;
kdb_trap_printk++;
__handle_sysrq(*argv[1], false);
kdb_trap_printk--;
return 0;
}
#endif /* CONFIG_MAGIC_SYSRQ */
/*
* kdb_ef - This function implements the 'regs' (display exception
* frame) command. This command takes an address and expects to
* find an exception frame at that address, formats and prints
* it.
* regs address-expression
* Remarks:
* Not done yet.
*/
static int kdb_ef(int argc, const char **argv)
{
int diag;
unsigned long addr;
long offset;
int nextarg;
if (argc != 1)
return KDB_ARGCOUNT;
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
if (diag)
return diag;
show_regs((struct pt_regs *)addr);
return 0;
}
#if defined(CONFIG_MODULES)
/*
* kdb_lsmod - This function implements the 'lsmod' command. Lists
* currently loaded kernel modules.
* Mostly taken from userland lsmod.
*/
static int kdb_lsmod(int argc, const char **argv)
{
struct module *mod;
if (argc != 0)
return KDB_ARGCOUNT;
kdb_printf("Module Size modstruct Used by\n");
list_for_each_entry(mod, kdb_modules, list) {
kdb_printf("%-20s%8u 0x%p ", mod->name,
mod->core_size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
kdb_printf("%4ld ", module_refcount(mod));
#endif
if (mod->state == MODULE_STATE_GOING)
kdb_printf(" (Unloading)");
else if (mod->state == MODULE_STATE_COMING)
kdb_printf(" (Loading)");
else
kdb_printf(" (Live)");
kdb_printf(" 0x%p", mod->module_core);
#ifdef CONFIG_MODULE_UNLOAD
{
struct module_use *use;
kdb_printf(" [ ");
list_for_each_entry(use, &mod->source_list,
source_list)
kdb_printf("%s ", use->target->name);
kdb_printf("]\n");
}
#endif
}
return 0;
}
#endif /* CONFIG_MODULES */
/*
* kdb_env - This function implements the 'env' command. Display the
* current environment variables.
*/
static int kdb_env(int argc, const char **argv)
{
int i;
for (i = 0; i < __nenv; i++) {
if (__env[i])
kdb_printf("%s\n", __env[i]);
}
if (KDB_DEBUG(MASK))
kdb_printf("KDBFLAGS=0x%x\n", kdb_flags);
return 0;
}
#ifdef CONFIG_PRINTK
/*
* kdb_dmesg - This function implements the 'dmesg' command to display
* the contents of the syslog buffer.
* dmesg [lines] [adjust]
*/
static int kdb_dmesg(int argc, const char **argv)
{
char *syslog_data[4], *start, *end, c = '\0', *p;
int diag, logging, logsize, lines = 0, adjust = 0, n;
if (argc > 2)
return KDB_ARGCOUNT;
if (argc) {
char *cp;
lines = simple_strtol(argv[1], &cp, 0);
if (*cp)
lines = 0;
if (argc > 1) {
adjust = simple_strtoul(argv[2], &cp, 0);
if (*cp || adjust < 0)
adjust = 0;
}
}
/* disable LOGGING if set */
diag = kdbgetintenv("LOGGING", &logging);
if (!diag && logging) {
const char *setargs[] = { "set", "LOGGING", "0" };
kdb_set(2, setargs);
}
/* syslog_data[0,1] physical start, end+1. syslog_data[2,3]
* logical start, end+1. */
kdb_syslog_data(syslog_data);
if (syslog_data[2] == syslog_data[3])
return 0;
logsize = syslog_data[1] - syslog_data[0];
start = syslog_data[2];
end = syslog_data[3];
#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
for (n = 0, p = start; p < end; ++p) {
c = *KDB_WRAP(p);
if (c == '\n')
++n;
}
if (c != '\n')
++n;
if (lines < 0) {
if (adjust >= n)
kdb_printf("buffer only contains %d lines, nothing "
"printed\n", n);
else if (adjust - lines >= n)
kdb_printf("buffer only contains %d lines, last %d "
"lines printed\n", n, n - adjust);
if (adjust) {
for (; start < end && adjust; ++start) {
if (*KDB_WRAP(start) == '\n')
--adjust;
}
if (start < end)
++start;
}
for (p = start; p < end && lines; ++p) {
if (*KDB_WRAP(p) == '\n')
++lines;
}
end = p;
} else if (lines > 0) {
int skip = n - (adjust + lines);
if (adjust >= n) {
kdb_printf("buffer only contains %d lines, "
"nothing printed\n", n);
skip = n;
} else if (skip < 0) {
lines += skip;
skip = 0;
kdb_printf("buffer only contains %d lines, first "
"%d lines printed\n", n, lines);
}
for (; start < end && skip; ++start) {
if (*KDB_WRAP(start) == '\n')
--skip;
}
for (p = start; p < end && lines; ++p) {
if (*KDB_WRAP(p) == '\n')
--lines;
}
end = p;
}
/* Do a line at a time (max 200 chars) to reduce protocol overhead */
c = '\n';
while (start != end) {
char buf[201];
p = buf;
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
while (start < end && (c = *KDB_WRAP(start)) &&
(p - buf) < sizeof(buf)-1) {
++start;
*p++ = c;
if (c == '\n')
break;
}
*p = '\0';
kdb_printf("%s", buf);
}
if (c != '\n')
kdb_printf("\n");
return 0;
}
#endif /* CONFIG_PRINTK */
/*
* kdb_cpu - This function implements the 'cpu' command.
* cpu [<cpunum>]
* Returns:
* KDB_CMD_CPU for success, a kdb diagnostic if error
*/
static void kdb_cpu_status(void)
{
int i, start_cpu, first_print = 1;
char state, prev_state = '?';
kdb_printf("Currently on cpu %d\n", raw_smp_processor_id());
kdb_printf("Available cpus: ");
for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) {
state = 'F'; /* cpu is offline */
} else {
state = ' '; /* cpu is responding to kdb */
if (kdb_task_state_char(KDB_TSK(i)) == 'I')
state = 'I'; /* idle task */
}
if (state != prev_state) {
if (prev_state != '?') {
if (!first_print)
kdb_printf(", ");
first_print = 0;
kdb_printf("%d", start_cpu);
if (start_cpu < i-1)
kdb_printf("-%d", i-1);
if (prev_state != ' ')
kdb_printf("(%c)", prev_state);
}
prev_state = state;
start_cpu = i;
}
}
/* print the trailing cpus, ignoring them if they are all offline */
if (prev_state != 'F') {
if (!first_print)
kdb_printf(", ");
kdb_printf("%d", start_cpu);
if (start_cpu < i-1)
kdb_printf("-%d", i-1);
if (prev_state != ' ')
kdb_printf("(%c)", prev_state);
}
kdb_printf("\n");
}
static int kdb_cpu(int argc, const char **argv)
{
unsigned long cpunum;
int diag;
if (argc == 0) {
kdb_cpu_status();
return 0;
}
if (argc != 1)
return KDB_ARGCOUNT;
diag = kdbgetularg(argv[1], &cpunum);
if (diag)
return diag;
/*
* Validate cpunum
*/
if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
return KDB_BADCPUNUM;
dbg_switch_cpu = cpunum;
/*
* Switch to other cpu
*/
return KDB_CMD_CPU;
}
/* The user may not realize that ps/bta with no parameters does not print idle
* or sleeping system daemon processes, so tell them how many were suppressed.
*/
void kdb_ps_suppressed(void)
{
int idle = 0, daemon = 0;
unsigned long mask_I = kdb_task_state_string("I"),
mask_M = kdb_task_state_string("M");
unsigned long cpu;
const struct task_struct *p, *g;
for_each_online_cpu(cpu) {
p = kdb_curr_task(cpu);
if (kdb_task_state(p, mask_I))
++idle;
}
kdb_do_each_thread(g, p) {
if (kdb_task_state(p, mask_M))
++daemon;
} kdb_while_each_thread(g, p);
if (idle || daemon) {
if (idle)
kdb_printf("%d idle process%s (state I)%s\n",
idle, idle == 1 ? "" : "es",
daemon ? " and " : "");
if (daemon)
kdb_printf("%d sleeping system daemon (state M) "
"process%s", daemon,
daemon == 1 ? "" : "es");
kdb_printf(" suppressed,\nuse 'ps A' to see all.\n");
}
}
/*
* kdb_ps - This function implements the 'ps' command which shows a
* list of the active processes.
* ps [DRSTCZEUIMA] All processes, optionally filtered by state
*/
void kdb_ps1(const struct task_struct *p)
{
int cpu;
unsigned long tmp;
if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
return;
cpu = kdb_process_cpu(p);
kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
(void *)p, p->pid, p->parent->pid,
kdb_task_has_cpu(p), kdb_process_cpu(p),
kdb_task_state_char(p),
(void *)(&p->thread),
p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
p->comm);
if (kdb_task_has_cpu(p)) {
if (!KDB_TSK(cpu)) {
kdb_printf(" Error: no saved data for this cpu\n");
} else {
if (KDB_TSK(cpu) != p)
kdb_printf(" Error: does not match running "
"process table (0x%p)\n", KDB_TSK(cpu));
}
}
}
static int kdb_ps(int argc, const char **argv)
{
struct task_struct *g, *p;
unsigned long mask, cpu;
if (argc == 0)
kdb_ps_suppressed();
kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n",
(int)(2*sizeof(void *))+2, "Task Addr",
(int)(2*sizeof(void *))+2, "Thread");
mask = kdb_task_state_string(argc ? argv[1] : NULL);
/* Run the active tasks first */
for_each_online_cpu(cpu) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
p = kdb_curr_task(cpu);
if (kdb_task_state(p, mask))
kdb_ps1(p);
}
kdb_printf("\n");
/* Now the real tasks */
kdb_do_each_thread(g, p) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
if (kdb_task_state(p, mask))
kdb_ps1(p);
} kdb_while_each_thread(g, p);
return 0;
}
/*
* kdb_pid - This function implements the 'pid' command which switches
* the currently active process.
* pid [<pid> | R]
*/
static int kdb_pid(int argc, const char **argv)
{
struct task_struct *p;
unsigned long val;
int diag;
if (argc > 1)
return KDB_ARGCOUNT;
if (argc) {
if (strcmp(argv[1], "R") == 0) {
p = KDB_TSK(kdb_initial_cpu);
} else {
diag = kdbgetularg(argv[1], &val);
if (diag)
return KDB_BADINT;
p = find_task_by_pid_ns((pid_t)val, &init_pid_ns);
if (!p) {
kdb_printf("No task with pid=%d\n", (pid_t)val);
return 0;
}
}
kdb_set_current_task(p);
}
kdb_printf("KDB current process is %s(pid=%d)\n",
kdb_current_task->comm,
kdb_current_task->pid);
return 0;
}
/*
* kdb_ll - This function implements the 'll' command which follows a
* linked list and executes an arbitrary command for each
* element.
*/
static int kdb_ll(int argc, const char **argv)
{
int diag = 0;
unsigned long addr;
long offset = 0;
unsigned long va;
unsigned long linkoffset;
int nextarg;
const char *command;
if (argc != 3)
return KDB_ARGCOUNT;
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
if (diag)
return diag;
diag = kdbgetularg(argv[2], &linkoffset);
if (diag)
return diag;
/*
* Using the starting address as
* the first element in the list, and assuming that
* the list ends with a null pointer.
*/
va = addr;
command = kdb_strdup(argv[3], GFP_KDB);
if (!command) {
kdb_printf("%s: cannot duplicate command\n", __func__);
return 0;
}
/* Recursive use of kdb_parse, do not use argv after this point */
argv = NULL;
while (va) {
char buf[80];
if (KDB_FLAG(CMD_INTERRUPT))
goto out;
sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
diag = kdb_parse(buf);
if (diag)
goto out;
addr = va + linkoffset;
if (kdb_getword(&va, addr, sizeof(va)))
goto out;
}
out:
kfree(command);
return diag;
}
static int kdb_kgdb(int argc, const char **argv)
{
return KDB_CMD_KGDB;
}
/*
* kdb_help - This function implements the 'help' and '?' commands.
*/
static int kdb_help(int argc, const char **argv)
{
kdbtab_t *kt;
int i;
kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description");
kdb_printf("-----------------------------"
"-----------------------------\n");
for_each_kdbcmd(kt, i) {
if (kt->cmd_name)
kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
kt->cmd_usage, kt->cmd_help);
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
}
return 0;
}
/*
* kdb_kill - This function implements the 'kill' commands.
*/
static int kdb_kill(int argc, const char **argv)
{
long sig, pid;
char *endp;
struct task_struct *p;
struct siginfo info;
if (argc != 2)
return KDB_ARGCOUNT;
sig = simple_strtol(argv[1], &endp, 0);
if (*endp)
return KDB_BADINT;
if (sig >= 0) {
kdb_printf("Invalid signal parameter.<-signal>\n");
return 0;
}
sig = -sig;
pid = simple_strtol(argv[2], &endp, 0);
if (*endp)
return KDB_BADINT;
if (pid <= 0) {
kdb_printf("Process ID must be large than 0.\n");
return 0;
}
/* Find the process. */
p = find_task_by_pid_ns(pid, &init_pid_ns);
if (!p) {
kdb_printf("The specified process isn't found.\n");
return 0;
}
p = p->group_leader;
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = pid; /* same capabilities as process being signalled */
info.si_uid = 0; /* kdb has root authority */
kdb_send_sig_info(p, &info);
return 0;
}
struct kdb_tm {
int tm_sec; /* seconds */
int tm_min; /* minutes */
int tm_hour; /* hours */
int tm_mday; /* day of the month */
int tm_mon; /* month */
int tm_year; /* year */
};
static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
{
/* This will work from 1970-2099, 2100 is not a leap year */
static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31,
31, 30, 31, 30, 31 };
memset(tm, 0, sizeof(*tm));
tm->tm_sec = tv->tv_sec % (24 * 60 * 60);
tm->tm_mday = tv->tv_sec / (24 * 60 * 60) +
(2 * 365 + 1); /* shift base from 1970 to 1968 */
tm->tm_min = tm->tm_sec / 60 % 60;
tm->tm_hour = tm->tm_sec / 60 / 60;
tm->tm_sec = tm->tm_sec % 60;
tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
tm->tm_mday %= (4*365+1);
mon_day[1] = 29;
while (tm->tm_mday >= mon_day[tm->tm_mon]) {
tm->tm_mday -= mon_day[tm->tm_mon];
if (++tm->tm_mon == 12) {
tm->tm_mon = 0;
++tm->tm_year;
mon_day[1] = 28;
}
}
++tm->tm_mday;
}
/*
* Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
* I cannot call that code directly from kdb, it has an unconditional
* cli()/sti() and calls routines that take locks which can stop the debugger.
*/
static void kdb_sysinfo(struct sysinfo *val)
{
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
memset(val, 0, sizeof(*val));
val->uptime = uptime.tv_sec;
val->loads[0] = avenrun[0];
val->loads[1] = avenrun[1];
val->loads[2] = avenrun[2];
val->procs = nr_threads-1;
si_meminfo(val);
return;
}
/*
* kdb_summary - This function implements the 'summary' command.
*/
static int kdb_summary(int argc, const char **argv)
{
struct timespec now;
struct kdb_tm tm;
struct sysinfo val;
if (argc)
return KDB_ARGCOUNT;
kdb_printf("sysname %s\n", init_uts_ns.name.sysname);
kdb_printf("release %s\n", init_uts_ns.name.release);
kdb_printf("version %s\n", init_uts_ns.name.version);
kdb_printf("machine %s\n", init_uts_ns.name.machine);
kdb_printf("nodename %s\n", init_uts_ns.name.nodename);
kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
kdb_printf("ccversion %s\n", __stringify(CCVERSION));
now = __current_kernel_time();
kdb_gmtime(&now, &tm);
kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d "
"tz_minuteswest %d\n",
1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec,
sys_tz.tz_minuteswest);
kdb_sysinfo(&val);
kdb_printf("uptime ");
if (val.uptime > (24*60*60)) {
int days = val.uptime / (24*60*60);
val.uptime %= (24*60*60);
kdb_printf("%d day%s ", days, days == 1 ? "" : "s");
}
kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
/* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n",
LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
#undef LOAD_INT
#undef LOAD_FRAC
/* Display in kilobytes */
#define K(x) ((x) << (PAGE_SHIFT - 10))
kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
"Buffers: %8lu kB\n",
val.totalram, val.freeram, val.bufferram);
return 0;
}
/*
* kdb_per_cpu - This function implements the 'per_cpu' command.
*/
static int kdb_per_cpu(int argc, const char **argv)
{
char fmtstr[64];
int cpu, diag, nextarg = 1;
unsigned long addr, symaddr, val, bytesperword = 0, whichcpu = ~0UL;
if (argc < 1 || argc > 3)
return KDB_ARGCOUNT;
diag = kdbgetaddrarg(argc, argv, &nextarg, &symaddr, NULL, NULL);
if (diag)
return diag;
if (argc >= 2) {
diag = kdbgetularg(argv[2], &bytesperword);
if (diag)
return diag;
}
if (!bytesperword)
bytesperword = KDB_WORD_SIZE;
else if (bytesperword > KDB_WORD_SIZE)
return KDB_BADWIDTH;
sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword));
if (argc >= 3) {
diag = kdbgetularg(argv[3], &whichcpu);
if (diag)
return diag;
if (!cpu_online(whichcpu)) {
kdb_printf("cpu %ld is not online\n", whichcpu);
return KDB_BADCPUNUM;
}
}
/* Most architectures use __per_cpu_offset[cpu], some use
* __per_cpu_offset(cpu), smp has no __per_cpu_offset.
*/
#ifdef __per_cpu_offset
#define KDB_PCU(cpu) __per_cpu_offset(cpu)
#else
#ifdef CONFIG_SMP
#define KDB_PCU(cpu) __per_cpu_offset[cpu]
#else
#define KDB_PCU(cpu) 0
#endif
#endif
for_each_online_cpu(cpu) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
if (whichcpu != ~0UL && whichcpu != cpu)
continue;
addr = symaddr + KDB_PCU(cpu);
diag = kdb_getword(&val, addr, bytesperword);
if (diag) {
kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to "
"read, diag=%d\n", cpu, addr, diag);
continue;
}
kdb_printf("%5d ", cpu);
kdb_md_line(fmtstr, addr,
bytesperword == KDB_WORD_SIZE,
1, bytesperword, 1, 1, 0);
}
#undef KDB_PCU
return 0;
}
/*
* display help for the use of cmd | grep pattern
*/
static int kdb_grep_help(int argc, const char **argv)
{
kdb_printf("Usage of cmd args | grep pattern:\n");
kdb_printf(" Any command's output may be filtered through an ");
kdb_printf("emulated 'pipe'.\n");
kdb_printf(" 'grep' is just a key word.\n");
kdb_printf(" The pattern may include a very limited set of "
"metacharacters:\n");
kdb_printf(" pattern or ^pattern or pattern$ or ^pattern$\n");
kdb_printf(" And if there are spaces in the pattern, you may "
"quote it:\n");
kdb_printf(" \"pat tern\" or \"^pat tern\" or \"pat tern$\""
" or \"^pat tern$\"\n");
return 0;
}
/*
* kdb_register_repeat - This function is used to register a kernel
* debugger command.
* Inputs:
* cmd Command name
* func Function to execute the command
* usage A simple usage string showing arguments
* help A simple help string describing command
* repeat Does the command auto repeat on enter?
* Returns:
* zero for success, one if a duplicate command.
*/
#define kdb_command_extend 50 /* arbitrary */
int kdb_register_repeat(char *cmd,
kdb_func_t func,
char *usage,
char *help,
short minlen,
kdb_repeat_t repeat)
{
int i;
kdbtab_t *kp;
/*
* Brute force method to determine duplicates
*/
for_each_kdbcmd(kp, i) {
if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
kdb_printf("Duplicate kdb command registered: "
"%s, func %p help %s\n", cmd, func, help);
return 1;
}
}
/*
* Insert command into first available location in table
*/
for_each_kdbcmd(kp, i) {
if (kp->cmd_name == NULL)
break;
}
if (i >= kdb_max_commands) {
kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
kdb_command_extend) * sizeof(*new), GFP_KDB);
if (!new) {
kdb_printf("Could not allocate new kdb_command "
"table\n");
return 1;
}
if (kdb_commands) {
memcpy(new, kdb_commands,
(kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new));
kfree(kdb_commands);
}
memset(new + kdb_max_commands, 0,
kdb_command_extend * sizeof(*new));
kdb_commands = new;
kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX;
kdb_max_commands += kdb_command_extend;
}
kp->cmd_name = cmd;
kp->cmd_func = func;
kp->cmd_usage = usage;
kp->cmd_help = help;
kp->cmd_flags = 0;
kp->cmd_minlen = minlen;
kp->cmd_repeat = repeat;
return 0;
}
EXPORT_SYMBOL_GPL(kdb_register_repeat);
/*
* kdb_register - Compatibility register function for commands that do
* not need to specify a repeat state. Equivalent to
* kdb_register_repeat with KDB_REPEAT_NONE.
* Inputs:
* cmd Command name
* func Function to execute the command
* usage A simple usage string showing arguments
* help A simple help string describing command
* Returns:
* zero for success, one if a duplicate command.
*/
int kdb_register(char *cmd,
kdb_func_t func,
char *usage,
char *help,
short minlen)
{
return kdb_register_repeat(cmd, func, usage, help, minlen,
KDB_REPEAT_NONE);
}
EXPORT_SYMBOL_GPL(kdb_register);
/*
* kdb_unregister - This function is used to unregister a kernel
* debugger command. It is generally called when a module which
* implements kdb commands is unloaded.
* Inputs:
* cmd Command name
* Returns:
* zero for success, one command not registered.
*/
int kdb_unregister(char *cmd)
{
int i;
kdbtab_t *kp;
/*
* find the command.
*/
for_each_kdbcmd(kp, i) {
if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
kp->cmd_name = NULL;
return 0;
}
}
/* Couldn't find it. */
return 1;
}
EXPORT_SYMBOL_GPL(kdb_unregister);
/* Initialize the kdb command table. */
static void __init kdb_inittab(void)
{
int i;
kdbtab_t *kp;
for_each_kdbcmd(kp, i)
kp->cmd_name = NULL;
kdb_register_repeat("md", kdb_md, "<vaddr>",
"Display Memory Contents, also mdWcN, e.g. md8c1", 1,
KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
"Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
"Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mds", kdb_md, "<vaddr>",
"Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
"Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("go", kdb_go, "[<vaddr>]",
"Continue Execution", 1, KDB_REPEAT_NONE);
kdb_register_repeat("rd", kdb_rd, "",
"Display Registers", 0, KDB_REPEAT_NONE);
kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
"Modify Registers", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ef", kdb_ef, "<vaddr>",
"Display exception frame", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
"Stack traceback", 1, KDB_REPEAT_NONE);
kdb_register_repeat("btp", kdb_bt, "<pid>",
"Display stack for process <pid>", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
"Display stack all processes", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btc", kdb_bt, "",
"Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
KDB_REPEAT_NONE);
kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
"Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
kdb_register_repeat("env", kdb_env, "",
"Show environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("set", kdb_set, "",
"Set environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("help", kdb_help, "",
"Display Help Message", 1, KDB_REPEAT_NONE);
kdb_register_repeat("?", kdb_help, "",
"Display Help Message", 0, KDB_REPEAT_NONE);
kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
"Switch to new cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("kgdb", kdb_kgdb, "",
"Enter kgdb mode", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
"Display active task list", 0, KDB_REPEAT_NONE);
kdb_register_repeat("pid", kdb_pid, "<pidnum>",
"Switch to another task", 0, KDB_REPEAT_NONE);
kdb_register_repeat("reboot", kdb_reboot, "",
"Reboot the machine immediately", 0, KDB_REPEAT_NONE);
#if defined(CONFIG_MODULES)
kdb_register_repeat("lsmod", kdb_lsmod, "",
"List loaded kernel modules", 0, KDB_REPEAT_NONE);
#endif
#if defined(CONFIG_MAGIC_SYSRQ)
kdb_register_repeat("sr", kdb_sr, "<key>",
"Magic SysRq key", 0, KDB_REPEAT_NONE);
#endif
#if defined(CONFIG_PRINTK)
kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
"Display syslog buffer", 0, KDB_REPEAT_NONE);
#endif
kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
"Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
"Send a signal to a process", 0, KDB_REPEAT_NONE);
kdb_register_repeat("summary", kdb_summary, "",
"Summarize the system", 4, KDB_REPEAT_NONE);
kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
"Display per_cpu variables", 3, KDB_REPEAT_NONE);
kdb_register_repeat("grephelp", kdb_grep_help, "",
"Display help on | grep", 0, KDB_REPEAT_NONE);
}
/* Execute any commands defined in kdb_cmds. */
static void __init kdb_cmd_init(void)
{
int i, diag;
for (i = 0; kdb_cmds[i]; ++i) {
diag = kdb_parse(kdb_cmds[i]);
if (diag)
kdb_printf("kdb command %s failed, kdb diag %d\n",
kdb_cmds[i], diag);
}
if (defcmd_in_progress) {
kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n");
kdb_parse("endefcmd");
}
}
/* Initialize kdb_printf, breakpoint tables and kdb state */
void __init kdb_init(int lvl)
{
static int kdb_init_lvl = KDB_NOT_INITIALIZED;
int i;
if (kdb_init_lvl == KDB_INIT_FULL || lvl <= kdb_init_lvl)
return;
for (i = kdb_init_lvl; i < lvl; i++) {
switch (i) {
case KDB_NOT_INITIALIZED:
kdb_inittab(); /* Initialize Command Table */
kdb_initbptab(); /* Initialize Breakpoints */
break;
case KDB_INIT_EARLY:
kdb_cmd_init(); /* Build kdb_cmds tables */
break;
}
}
kdb_init_lvl = lvl;
}
| gpl-2.0 |
wujichang/linux | arch/um/drivers/chan_user.c | 4564 | 7115 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
* Licensed under the GPL
*/
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <termios.h>
#include <sys/ioctl.h>
#include "chan_user.h"
#include <os.h>
#include <um_malloc.h>
void generic_close(int fd, void *unused)
{
close(fd);
}
int generic_read(int fd, char *c_out, void *unused)
{
int n;
n = read(fd, c_out, sizeof(*c_out));
if (n > 0)
return n;
else if (errno == EAGAIN)
return 0;
else if (n == 0)
return -EIO;
return -errno;
}
/* XXX Trivial wrapper around write */
int generic_write(int fd, const char *buf, int n, void *unused)
{
int err;
err = write(fd, buf, n);
if (err > 0)
return err;
else if (errno == EAGAIN)
return 0;
else if (err == 0)
return -EIO;
return -errno;
}
int generic_window_size(int fd, void *unused, unsigned short *rows_out,
unsigned short *cols_out)
{
struct winsize size;
int ret;
if (ioctl(fd, TIOCGWINSZ, &size) < 0)
return -errno;
ret = ((*rows_out != size.ws_row) || (*cols_out != size.ws_col));
*rows_out = size.ws_row;
*cols_out = size.ws_col;
return ret;
}
void generic_free(void *data)
{
kfree(data);
}
int generic_console_write(int fd, const char *buf, int n)
{
sigset_t old, no_sigio;
struct termios save, new;
int err;
if (isatty(fd)) {
sigemptyset(&no_sigio);
sigaddset(&no_sigio, SIGIO);
if (sigprocmask(SIG_BLOCK, &no_sigio, &old))
goto error;
CATCH_EINTR(err = tcgetattr(fd, &save));
if (err)
goto error;
new = save;
/*
* The terminal becomes a bit less raw, to handle \n also as
* "Carriage Return", not only as "New Line". Otherwise, the new
* line won't start at the first column.
*/
new.c_oflag |= OPOST;
CATCH_EINTR(err = tcsetattr(fd, TCSAFLUSH, &new));
if (err)
goto error;
}
err = generic_write(fd, buf, n, NULL);
/*
* Restore raw mode, in any case; we *must* ignore any error apart
* EINTR, except for debug.
*/
if (isatty(fd)) {
CATCH_EINTR(tcsetattr(fd, TCSAFLUSH, &save));
sigprocmask(SIG_SETMASK, &old, NULL);
}
return err;
error:
return -errno;
}
/*
* UML SIGWINCH handling
*
* The point of this is to handle SIGWINCH on consoles which have host
* ttys and relay them inside UML to whatever might be running on the
* console and cares about the window size (since SIGWINCH notifies
* about terminal size changes).
*
* So, we have a separate thread for each host tty attached to a UML
* device (side-issue - I'm annoyed that one thread can't have
* multiple controlling ttys for the purpose of handling SIGWINCH, but
* I imagine there are other reasons that doesn't make any sense).
*
* SIGWINCH can't be received synchronously, so you have to set up to
* receive it as a signal. That being the case, if you are going to
* wait for it, it is convenient to sit in sigsuspend() and wait for
* the signal to bounce you out of it (see below for how we make sure
* to exit only on SIGWINCH).
*/
static void winch_handler(int sig)
{
}
struct winch_data {
int pty_fd;
int pipe_fd;
};
static int winch_thread(void *arg)
{
struct winch_data *data = arg;
sigset_t sigs;
int pty_fd, pipe_fd;
int count;
char c = 1;
pty_fd = data->pty_fd;
pipe_fd = data->pipe_fd;
count = write(pipe_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "winch_thread : failed to write "
"synchronization byte, err = %d\n", -count);
/*
* We are not using SIG_IGN on purpose, so don't fix it as I thought to
* do! If using SIG_IGN, the sigsuspend() call below would not stop on
* SIGWINCH.
*/
signal(SIGWINCH, winch_handler);
sigfillset(&sigs);
/* Block all signals possible. */
if (sigprocmask(SIG_SETMASK, &sigs, NULL) < 0) {
printk(UM_KERN_ERR "winch_thread : sigprocmask failed, "
"errno = %d\n", errno);
exit(1);
}
/* In sigsuspend(), block anything else than SIGWINCH. */
sigdelset(&sigs, SIGWINCH);
if (setsid() < 0) {
printk(UM_KERN_ERR "winch_thread : setsid failed, errno = %d\n",
errno);
exit(1);
}
if (ioctl(pty_fd, TIOCSCTTY, 0) < 0) {
printk(UM_KERN_ERR "winch_thread : TIOCSCTTY failed on "
"fd %d err = %d\n", pty_fd, errno);
exit(1);
}
if (tcsetpgrp(pty_fd, os_getpid()) < 0) {
printk(UM_KERN_ERR "winch_thread : tcsetpgrp failed on "
"fd %d err = %d\n", pty_fd, errno);
exit(1);
}
/*
* These are synchronization calls between various UML threads on the
* host - since they are not different kernel threads, we cannot use
* kernel semaphores. We don't use SysV semaphores because they are
* persistent.
*/
count = read(pipe_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "winch_thread : failed to read "
"synchronization byte, err = %d\n", errno);
while(1) {
/*
* This will be interrupted by SIGWINCH only, since
* other signals are blocked.
*/
sigsuspend(&sigs);
count = write(pipe_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "winch_thread : write failed, "
"err = %d\n", errno);
}
}
static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
unsigned long *stack_out)
{
struct winch_data data;
int fds[2], n, err;
char c;
err = os_pipe(fds, 1, 1);
if (err < 0) {
printk(UM_KERN_ERR "winch_tramp : os_pipe failed, err = %d\n",
-err);
goto out;
}
data = ((struct winch_data) { .pty_fd = fd,
.pipe_fd = fds[1] } );
/*
* CLONE_FILES so this thread doesn't hold open files which are open
* now, but later closed in a different thread. This is a
* problem with /dev/net/tun, which if held open by this
* thread, prevents the TUN/TAP device from being reused.
*/
err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
if (err < 0) {
printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
-err);
goto out_close;
}
*fd_out = fds[0];
n = read(fds[0], &c, sizeof(c));
if (n != sizeof(c)) {
printk(UM_KERN_ERR "winch_tramp : failed to read "
"synchronization byte\n");
printk(UM_KERN_ERR "read failed, err = %d\n", errno);
printk(UM_KERN_ERR "fd %d will not support SIGWINCH\n", fd);
err = -EINVAL;
goto out_close;
}
if (os_set_fd_block(*fd_out, 0)) {
printk(UM_KERN_ERR "winch_tramp: failed to set thread_fd "
"non-blocking.\n");
goto out_close;
}
return err;
out_close:
close(fds[1]);
close(fds[0]);
out:
return err;
}
void register_winch(int fd, struct tty_port *port)
{
unsigned long stack;
int pid, thread, count, thread_fd = -1;
char c = 1;
if (!isatty(fd))
return;
pid = tcgetpgrp(fd);
if (is_skas_winch(pid, fd, port)) {
register_winch_irq(-1, fd, -1, port, 0);
return;
}
if (pid == -1) {
thread = winch_tramp(fd, port, &thread_fd, &stack);
if (thread < 0)
return;
register_winch_irq(thread_fd, fd, thread, port, stack);
count = write(thread_fd, &c, sizeof(c));
if (count != sizeof(c))
printk(UM_KERN_ERR "register_winch : failed to write "
"synchronization byte, err = %d\n", errno);
}
}
| gpl-2.0 |
Kra1o5/android_kernel_bq_rk3066 | arch/powerpc/kernel/isa-bridge.c | 4564 | 7423 | /*
* Routines for tracking a legacy ISA bridge
*
* Copyrigh 2007 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
*
* Some bits and pieces moved over from pci_64.c
*
* Copyrigh 2003 Anton Blanchard <anton@au.ibm.com>, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
unsigned long isa_io_base; /* NULL if no ISA bus */
EXPORT_SYMBOL(isa_io_base);
/* Cached ISA bridge dev. */
static struct device_node *isa_bridge_devnode;
struct pci_dev *isa_bridge_pcidev;
EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
#define ISA_SPACE_MASK 0x1
#define ISA_SPACE_IO 0x1
static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
unsigned long phb_io_base_phys)
{
/* We should get some saner parsing here and remove these structs */
struct pci_address {
u32 a_hi;
u32 a_mid;
u32 a_lo;
};
struct isa_address {
u32 a_hi;
u32 a_lo;
};
struct isa_range {
struct isa_address isa_addr;
struct pci_address pci_addr;
unsigned int size;
};
const struct isa_range *range;
unsigned long pci_addr;
unsigned int isa_addr;
unsigned int size;
int rlen = 0;
range = of_get_property(isa_node, "ranges", &rlen);
if (range == NULL || (rlen < sizeof(struct isa_range)))
goto inval_range;
/* From "ISA Binding to 1275"
* The ranges property is laid out as an array of elements,
* each of which comprises:
* cells 0 - 1: an ISA address
* cells 2 - 4: a PCI address
* (size depending on dev->n_addr_cells)
* cell 5: the size of the range
*/
if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO) {
range++;
rlen -= sizeof(struct isa_range);
if (rlen < sizeof(struct isa_range))
goto inval_range;
}
if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO)
goto inval_range;
isa_addr = range->isa_addr.a_lo;
pci_addr = (unsigned long) range->pci_addr.a_mid << 32 |
range->pci_addr.a_lo;
/* Assume these are both zero. Note: We could fix that and
* do a proper parsing instead ... oh well, that will do for
* now as nobody uses fancy mappings for ISA bridges
*/
if ((pci_addr != 0) || (isa_addr != 0)) {
printk(KERN_ERR "unexpected isa to pci mapping: %s\n",
__func__);
return;
}
/* Align size and make sure it's cropped to 64K */
size = PAGE_ALIGN(range->size);
if (size > 0x10000)
size = 0x10000;
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
size, _PAGE_NO_CACHE|_PAGE_GUARDED);
return;
inval_range:
printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
"mapping 64k\n");
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
0x10000, _PAGE_NO_CACHE|_PAGE_GUARDED);
}
/**
* isa_bridge_find_early - Find and map the ISA IO space early before
* main PCI discovery. This is optionally called by
* the arch code when adding PCI PHBs to get early
* access to ISA IO ports
*/
void __init isa_bridge_find_early(struct pci_controller *hose)
{
struct device_node *np, *parent = NULL, *tmp;
/* If we already have an ISA bridge, bail off */
if (isa_bridge_devnode != NULL)
return;
/* For each "isa" node in the system. Note : we do a search by
* type and not by name. It might be better to do by name but that's
* what the code used to do and I don't want to break too much at
* once. We can look into changing that separately
*/
for_each_node_by_type(np, "isa") {
/* Look for our hose being a parent */
for (parent = of_get_parent(np); parent;) {
if (parent == hose->dn) {
of_node_put(parent);
break;
}
tmp = parent;
parent = of_get_parent(parent);
of_node_put(tmp);
}
if (parent != NULL)
break;
}
if (np == NULL)
return;
isa_bridge_devnode = np;
/* Now parse the "ranges" property and setup the ISA mapping */
pci_process_ISA_OF_ranges(np, hose->io_base_phys);
/* Set the global ISA io base to indicate we have an ISA bridge */
isa_io_base = ISA_IO_BASE;
pr_debug("ISA bridge (early) is %s\n", np->full_name);
}
/**
* isa_bridge_find_late - Find and map the ISA IO space upon discovery of
* a new ISA bridge
*/
static void __devinit isa_bridge_find_late(struct pci_dev *pdev,
struct device_node *devnode)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
/* Store ISA device node and PCI device */
isa_bridge_devnode = of_node_get(devnode);
isa_bridge_pcidev = pdev;
/* Now parse the "ranges" property and setup the ISA mapping */
pci_process_ISA_OF_ranges(devnode, hose->io_base_phys);
/* Set the global ISA io base to indicate we have an ISA bridge */
isa_io_base = ISA_IO_BASE;
pr_debug("ISA bridge (late) is %s on %s\n",
devnode->full_name, pci_name(pdev));
}
/**
* isa_bridge_remove - Remove/unmap an ISA bridge
*/
static void isa_bridge_remove(void)
{
pr_debug("ISA bridge removed !\n");
/* Clear the global ISA io base to indicate that we have no more
* ISA bridge. Note that drivers don't quite handle that, though
* we should probably do something about it. But do we ever really
* have ISA bridges being removed on machines using legacy devices ?
*/
isa_io_base = ISA_IO_BASE;
/* Clear references to the bridge */
of_node_put(isa_bridge_devnode);
isa_bridge_devnode = NULL;
isa_bridge_pcidev = NULL;
/* Unmap the ISA area */
__iounmap_at((void *)ISA_IO_BASE, 0x10000);
}
/**
* isa_bridge_notify - Get notified of PCI devices addition/removal
*/
static int __devinit isa_bridge_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct pci_dev *pdev = to_pci_dev(dev);
struct device_node *devnode = pci_device_to_OF_node(pdev);
switch(action) {
case BUS_NOTIFY_ADD_DEVICE:
/* Check if we have an early ISA device, without PCI dev */
if (isa_bridge_devnode && isa_bridge_devnode == devnode &&
!isa_bridge_pcidev) {
pr_debug("ISA bridge PCI attached: %s\n",
pci_name(pdev));
isa_bridge_pcidev = pdev;
}
/* Check if we have no ISA device, and this happens to be one,
* register it as such if it has an OF device
*/
if (!isa_bridge_devnode && devnode && devnode->type &&
!strcmp(devnode->type, "isa"))
isa_bridge_find_late(pdev, devnode);
return 0;
case BUS_NOTIFY_DEL_DEVICE:
/* Check if this our existing ISA device */
if (pdev == isa_bridge_pcidev ||
(devnode && devnode == isa_bridge_devnode))
isa_bridge_remove();
return 0;
}
return 0;
}
static struct notifier_block isa_bridge_notifier = {
.notifier_call = isa_bridge_notify
};
/**
* isa_bridge_init - register to be notified of ISA bridge addition/removal
*
*/
static int __init isa_bridge_init(void)
{
if (firmware_has_feature(FW_FEATURE_ISERIES))
return 0;
bus_register_notifier(&pci_bus_type, &isa_bridge_notifier);
return 0;
}
arch_initcall(isa_bridge_init);
| gpl-2.0 |
atilag/fairphone2-kernel | arch/arm/mach-shmobile/pfc-r8a7740.c | 4820 | 86864 | /*
* R8A7740 processor support
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <mach/r8a7740.h>
#define CPU_ALL_PORT(fn, pfx, sfx) \
PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
PORT_10(fn, pfx##10, sfx), PORT_90(fn, pfx##1, sfx), \
PORT_10(fn, pfx##20, sfx), \
PORT_1(fn, pfx##210, sfx), PORT_1(fn, pfx##211, sfx)
enum {
PINMUX_RESERVED = 0,
/* PORT0_DATA -> PORT211_DATA */
PINMUX_DATA_BEGIN,
PORT_ALL(DATA),
PINMUX_DATA_END,
/* PORT0_IN -> PORT211_IN */
PINMUX_INPUT_BEGIN,
PORT_ALL(IN),
PINMUX_INPUT_END,
/* PORT0_IN_PU -> PORT211_IN_PU */
PINMUX_INPUT_PULLUP_BEGIN,
PORT_ALL(IN_PU),
PINMUX_INPUT_PULLUP_END,
/* PORT0_IN_PD -> PORT211_IN_PD */
PINMUX_INPUT_PULLDOWN_BEGIN,
PORT_ALL(IN_PD),
PINMUX_INPUT_PULLDOWN_END,
/* PORT0_OUT -> PORT211_OUT */
PINMUX_OUTPUT_BEGIN,
PORT_ALL(OUT),
PINMUX_OUTPUT_END,
PINMUX_FUNCTION_BEGIN,
PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT211_FN_IN */
PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT211_FN_OUT */
PORT_ALL(FN0), /* PORT0_FN0 -> PORT211_FN0 */
PORT_ALL(FN1), /* PORT0_FN1 -> PORT211_FN1 */
PORT_ALL(FN2), /* PORT0_FN2 -> PORT211_FN2 */
PORT_ALL(FN3), /* PORT0_FN3 -> PORT211_FN3 */
PORT_ALL(FN4), /* PORT0_FN4 -> PORT211_FN4 */
PORT_ALL(FN5), /* PORT0_FN5 -> PORT211_FN5 */
PORT_ALL(FN6), /* PORT0_FN6 -> PORT211_FN6 */
PORT_ALL(FN7), /* PORT0_FN7 -> PORT211_FN7 */
MSEL1CR_31_0, MSEL1CR_31_1,
MSEL1CR_30_0, MSEL1CR_30_1,
MSEL1CR_29_0, MSEL1CR_29_1,
MSEL1CR_28_0, MSEL1CR_28_1,
MSEL1CR_27_0, MSEL1CR_27_1,
MSEL1CR_26_0, MSEL1CR_26_1,
MSEL1CR_16_0, MSEL1CR_16_1,
MSEL1CR_15_0, MSEL1CR_15_1,
MSEL1CR_14_0, MSEL1CR_14_1,
MSEL1CR_13_0, MSEL1CR_13_1,
MSEL1CR_12_0, MSEL1CR_12_1,
MSEL1CR_9_0, MSEL1CR_9_1,
MSEL1CR_7_0, MSEL1CR_7_1,
MSEL1CR_6_0, MSEL1CR_6_1,
MSEL1CR_5_0, MSEL1CR_5_1,
MSEL1CR_4_0, MSEL1CR_4_1,
MSEL1CR_3_0, MSEL1CR_3_1,
MSEL1CR_2_0, MSEL1CR_2_1,
MSEL1CR_0_0, MSEL1CR_0_1,
MSEL3CR_15_0, MSEL3CR_15_1, /* Trace / Debug ? */
MSEL3CR_6_0, MSEL3CR_6_1,
MSEL4CR_19_0, MSEL4CR_19_1,
MSEL4CR_18_0, MSEL4CR_18_1,
MSEL4CR_15_0, MSEL4CR_15_1,
MSEL4CR_10_0, MSEL4CR_10_1,
MSEL4CR_6_0, MSEL4CR_6_1,
MSEL4CR_4_0, MSEL4CR_4_1,
MSEL4CR_1_0, MSEL4CR_1_1,
MSEL5CR_31_0, MSEL5CR_31_1, /* irq/fiq output */
MSEL5CR_30_0, MSEL5CR_30_1,
MSEL5CR_29_0, MSEL5CR_29_1,
MSEL5CR_27_0, MSEL5CR_27_1,
MSEL5CR_25_0, MSEL5CR_25_1,
MSEL5CR_23_0, MSEL5CR_23_1,
MSEL5CR_21_0, MSEL5CR_21_1,
MSEL5CR_19_0, MSEL5CR_19_1,
MSEL5CR_17_0, MSEL5CR_17_1,
MSEL5CR_15_0, MSEL5CR_15_1,
MSEL5CR_14_0, MSEL5CR_14_1,
MSEL5CR_13_0, MSEL5CR_13_1,
MSEL5CR_12_0, MSEL5CR_12_1,
MSEL5CR_11_0, MSEL5CR_11_1,
MSEL5CR_10_0, MSEL5CR_10_1,
MSEL5CR_8_0, MSEL5CR_8_1,
MSEL5CR_7_0, MSEL5CR_7_1,
MSEL5CR_6_0, MSEL5CR_6_1,
MSEL5CR_5_0, MSEL5CR_5_1,
MSEL5CR_4_0, MSEL5CR_4_1,
MSEL5CR_3_0, MSEL5CR_3_1,
MSEL5CR_2_0, MSEL5CR_2_1,
MSEL5CR_0_0, MSEL5CR_0_1,
PINMUX_FUNCTION_END,
PINMUX_MARK_BEGIN,
/* IRQ */
IRQ0_PORT2_MARK, IRQ0_PORT13_MARK,
IRQ1_MARK,
IRQ2_PORT11_MARK, IRQ2_PORT12_MARK,
IRQ3_PORT10_MARK, IRQ3_PORT14_MARK,
IRQ4_PORT15_MARK, IRQ4_PORT172_MARK,
IRQ5_PORT0_MARK, IRQ5_PORT1_MARK,
IRQ6_PORT121_MARK, IRQ6_PORT173_MARK,
IRQ7_PORT120_MARK, IRQ7_PORT209_MARK,
IRQ8_MARK,
IRQ9_PORT118_MARK, IRQ9_PORT210_MARK,
IRQ10_MARK,
IRQ11_MARK,
IRQ12_PORT42_MARK, IRQ12_PORT97_MARK,
IRQ13_PORT64_MARK, IRQ13_PORT98_MARK,
IRQ14_PORT63_MARK, IRQ14_PORT99_MARK,
IRQ15_PORT62_MARK, IRQ15_PORT100_MARK,
IRQ16_PORT68_MARK, IRQ16_PORT211_MARK,
IRQ17_MARK,
IRQ18_MARK,
IRQ19_MARK,
IRQ20_MARK,
IRQ21_MARK,
IRQ22_MARK,
IRQ23_MARK,
IRQ24_MARK,
IRQ25_MARK,
IRQ26_PORT58_MARK, IRQ26_PORT81_MARK,
IRQ27_PORT57_MARK, IRQ27_PORT168_MARK,
IRQ28_PORT56_MARK, IRQ28_PORT169_MARK,
IRQ29_PORT50_MARK, IRQ29_PORT170_MARK,
IRQ30_PORT49_MARK, IRQ30_PORT171_MARK,
IRQ31_PORT41_MARK, IRQ31_PORT167_MARK,
/* Function */
/* DBGT */
DBGMDT2_MARK, DBGMDT1_MARK, DBGMDT0_MARK,
DBGMD10_MARK, DBGMD11_MARK, DBGMD20_MARK,
DBGMD21_MARK,
/* FSI */
FSIAISLD_PORT0_MARK, /* FSIAISLD Port 0/5 */
FSIAISLD_PORT5_MARK,
FSIASPDIF_PORT9_MARK, /* FSIASPDIF Port 9/18 */
FSIASPDIF_PORT18_MARK,
FSIAOSLD1_MARK, FSIAOSLD2_MARK, FSIAOLR_MARK,
FSIAOBT_MARK, FSIAOSLD_MARK, FSIAOMC_MARK,
FSIACK_MARK, FSIAILR_MARK, FSIAIBT_MARK,
/* FMSI */
FMSISLD_PORT1_MARK, /* FMSISLD Port 1/6 */
FMSISLD_PORT6_MARK,
FMSIILR_MARK, FMSIIBT_MARK, FMSIOLR_MARK, FMSIOBT_MARK,
FMSICK_MARK, FMSOILR_MARK, FMSOIBT_MARK, FMSOOLR_MARK,
FMSOOBT_MARK, FMSOSLD_MARK, FMSOCK_MARK,
/* SCIFA0 */
SCIFA0_SCK_MARK, SCIFA0_CTS_MARK, SCIFA0_RTS_MARK,
SCIFA0_RXD_MARK, SCIFA0_TXD_MARK,
/* SCIFA1 */
SCIFA1_CTS_MARK, SCIFA1_SCK_MARK, SCIFA1_RXD_MARK,
SCIFA1_TXD_MARK, SCIFA1_RTS_MARK,
/* SCIFA2 */
SCIFA2_SCK_PORT22_MARK, /* SCIFA2_SCK Port 22/199 */
SCIFA2_SCK_PORT199_MARK,
SCIFA2_RXD_MARK, SCIFA2_TXD_MARK,
SCIFA2_CTS_MARK, SCIFA2_RTS_MARK,
/* SCIFA3 */
SCIFA3_RTS_PORT105_MARK, /* MSEL5CR_8_0 */
SCIFA3_SCK_PORT116_MARK,
SCIFA3_CTS_PORT117_MARK,
SCIFA3_RXD_PORT174_MARK,
SCIFA3_TXD_PORT175_MARK,
SCIFA3_RTS_PORT161_MARK, /* MSEL5CR_8_1 */
SCIFA3_SCK_PORT158_MARK,
SCIFA3_CTS_PORT162_MARK,
SCIFA3_RXD_PORT159_MARK,
SCIFA3_TXD_PORT160_MARK,
/* SCIFA4 */
SCIFA4_RXD_PORT12_MARK, /* MSEL5CR[12:11] = 00 */
SCIFA4_TXD_PORT13_MARK,
SCIFA4_RXD_PORT204_MARK, /* MSEL5CR[12:11] = 01 */
SCIFA4_TXD_PORT203_MARK,
SCIFA4_RXD_PORT94_MARK, /* MSEL5CR[12:11] = 10 */
SCIFA4_TXD_PORT93_MARK,
SCIFA4_SCK_PORT21_MARK, /* SCIFA4_SCK Port 21/205 */
SCIFA4_SCK_PORT205_MARK,
/* SCIFA5 */
SCIFA5_TXD_PORT20_MARK, /* MSEL5CR[15:14] = 00 */
SCIFA5_RXD_PORT10_MARK,
SCIFA5_RXD_PORT207_MARK, /* MSEL5CR[15:14] = 01 */
SCIFA5_TXD_PORT208_MARK,
SCIFA5_TXD_PORT91_MARK, /* MSEL5CR[15:14] = 10 */
SCIFA5_RXD_PORT92_MARK,
SCIFA5_SCK_PORT23_MARK, /* SCIFA5_SCK Port 23/206 */
SCIFA5_SCK_PORT206_MARK,
/* SCIFA6 */
SCIFA6_SCK_MARK, SCIFA6_RXD_MARK, SCIFA6_TXD_MARK,
/* SCIFA7 */
SCIFA7_TXD_MARK, SCIFA7_RXD_MARK,
/* SCIFAB */
SCIFB_SCK_PORT190_MARK, /* MSEL5CR_17_0 */
SCIFB_RXD_PORT191_MARK,
SCIFB_TXD_PORT192_MARK,
SCIFB_RTS_PORT186_MARK,
SCIFB_CTS_PORT187_MARK,
SCIFB_SCK_PORT2_MARK, /* MSEL5CR_17_1 */
SCIFB_RXD_PORT3_MARK,
SCIFB_TXD_PORT4_MARK,
SCIFB_RTS_PORT172_MARK,
SCIFB_CTS_PORT173_MARK,
/* LCD0 */
LCDC0_SELECT_MARK,
LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
LCD0_D16_MARK, LCD0_D17_MARK,
LCD0_DON_MARK, LCD0_VCPWC_MARK, LCD0_VEPWC_MARK,
LCD0_DCK_MARK, LCD0_VSYN_MARK, /* for RGB */
LCD0_HSYN_MARK, LCD0_DISP_MARK, /* for RGB */
LCD0_WR_MARK, LCD0_RD_MARK, /* for SYS */
LCD0_CS_MARK, LCD0_RS_MARK, /* for SYS */
LCD0_D21_PORT158_MARK, LCD0_D23_PORT159_MARK, /* MSEL5CR_6_1 */
LCD0_D22_PORT160_MARK, LCD0_D20_PORT161_MARK,
LCD0_D19_PORT162_MARK, LCD0_D18_PORT163_MARK,
LCD0_LCLK_PORT165_MARK,
LCD0_D18_PORT40_MARK, LCD0_D22_PORT0_MARK, /* MSEL5CR_6_0 */
LCD0_D23_PORT1_MARK, LCD0_D21_PORT2_MARK,
LCD0_D20_PORT3_MARK, LCD0_D19_PORT4_MARK,
LCD0_LCLK_PORT102_MARK,
/* LCD1 */
LCDC1_SELECT_MARK,
LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK,
LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK,
LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK,
LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK,
LCD1_D16_MARK, LCD1_D17_MARK, LCD1_D18_MARK, LCD1_D19_MARK,
LCD1_D20_MARK, LCD1_D21_MARK, LCD1_D22_MARK, LCD1_D23_MARK,
LCD1_DON_MARK, LCD1_VCPWC_MARK,
LCD1_LCLK_MARK, LCD1_VEPWC_MARK,
LCD1_DCK_MARK, LCD1_VSYN_MARK, /* for RGB */
LCD1_HSYN_MARK, LCD1_DISP_MARK, /* for RGB */
LCD1_RS_MARK, LCD1_CS_MARK, /* for SYS */
LCD1_RD_MARK, LCD1_WR_MARK, /* for SYS */
/* RSPI */
RSPI_SSL0_A_MARK, RSPI_SSL1_A_MARK, RSPI_SSL2_A_MARK,
RSPI_SSL3_A_MARK, RSPI_CK_A_MARK, RSPI_MOSI_A_MARK,
RSPI_MISO_A_MARK,
/* VIO CKO */
VIO_CKO1_MARK, /* needs fixup */
VIO_CKO2_MARK,
VIO_CKO_1_MARK,
VIO_CKO_MARK,
/* VIO0 */
VIO0_D0_MARK, VIO0_D1_MARK, VIO0_D2_MARK, VIO0_D3_MARK,
VIO0_D4_MARK, VIO0_D5_MARK, VIO0_D6_MARK, VIO0_D7_MARK,
VIO0_D8_MARK, VIO0_D9_MARK, VIO0_D10_MARK, VIO0_D11_MARK,
VIO0_D12_MARK, VIO0_VD_MARK, VIO0_HD_MARK, VIO0_CLK_MARK,
VIO0_FIELD_MARK,
VIO0_D13_PORT26_MARK, /* MSEL5CR_27_0 */
VIO0_D14_PORT25_MARK,
VIO0_D15_PORT24_MARK,
VIO0_D13_PORT22_MARK, /* MSEL5CR_27_1 */
VIO0_D14_PORT95_MARK,
VIO0_D15_PORT96_MARK,
/* VIO1 */
VIO1_D0_MARK, VIO1_D1_MARK, VIO1_D2_MARK, VIO1_D3_MARK,
VIO1_D4_MARK, VIO1_D5_MARK, VIO1_D6_MARK, VIO1_D7_MARK,
VIO1_VD_MARK, VIO1_HD_MARK, VIO1_CLK_MARK, VIO1_FIELD_MARK,
/* TPU0 */
TPU0TO0_MARK, TPU0TO1_MARK, TPU0TO3_MARK,
TPU0TO2_PORT66_MARK, /* TPU0TO2 Port 66/202 */
TPU0TO2_PORT202_MARK,
/* SSP1 0 */
STP0_IPD0_MARK, STP0_IPD1_MARK, STP0_IPD2_MARK, STP0_IPD3_MARK,
STP0_IPD4_MARK, STP0_IPD5_MARK, STP0_IPD6_MARK, STP0_IPD7_MARK,
STP0_IPEN_MARK, STP0_IPCLK_MARK, STP0_IPSYNC_MARK,
/* SSP1 1 */
STP1_IPD1_MARK, STP1_IPD2_MARK, STP1_IPD3_MARK, STP1_IPD4_MARK,
STP1_IPD5_MARK, STP1_IPD6_MARK, STP1_IPD7_MARK, STP1_IPCLK_MARK,
STP1_IPSYNC_MARK,
STP1_IPD0_PORT186_MARK, /* MSEL5CR_23_0 */
STP1_IPEN_PORT187_MARK,
STP1_IPD0_PORT194_MARK, /* MSEL5CR_23_1 */
STP1_IPEN_PORT193_MARK,
/* SIM */
SIM_RST_MARK, SIM_CLK_MARK,
SIM_D_PORT22_MARK, /* SIM_D Port 22/199 */
SIM_D_PORT199_MARK,
/* SDHI0 */
SDHI0_D0_MARK, SDHI0_D1_MARK, SDHI0_D2_MARK, SDHI0_D3_MARK,
SDHI0_CD_MARK, SDHI0_WP_MARK, SDHI0_CMD_MARK, SDHI0_CLK_MARK,
/* SDHI1 */
SDHI1_D0_MARK, SDHI1_D1_MARK, SDHI1_D2_MARK, SDHI1_D3_MARK,
SDHI1_CD_MARK, SDHI1_WP_MARK, SDHI1_CMD_MARK, SDHI1_CLK_MARK,
/* SDHI2 */
SDHI2_D0_MARK, SDHI2_D1_MARK, SDHI2_D2_MARK, SDHI2_D3_MARK,
SDHI2_CLK_MARK, SDHI2_CMD_MARK,
SDHI2_CD_PORT24_MARK, /* MSEL5CR_19_0 */
SDHI2_WP_PORT25_MARK,
SDHI2_WP_PORT177_MARK, /* MSEL5CR_19_1 */
SDHI2_CD_PORT202_MARK,
/* MSIOF2 */
MSIOF2_TXD_MARK, MSIOF2_RXD_MARK, MSIOF2_TSCK_MARK,
MSIOF2_SS2_MARK, MSIOF2_TSYNC_MARK, MSIOF2_SS1_MARK,
MSIOF2_MCK1_MARK, MSIOF2_MCK0_MARK, MSIOF2_RSYNC_MARK,
MSIOF2_RSCK_MARK,
/* KEYSC */
KEYIN4_MARK, KEYIN5_MARK, KEYIN6_MARK, KEYIN7_MARK,
KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK,
KEYOUT4_MARK, KEYOUT5_MARK, KEYOUT6_MARK, KEYOUT7_MARK,
KEYIN0_PORT43_MARK, /* MSEL4CR_18_0 */
KEYIN1_PORT44_MARK,
KEYIN2_PORT45_MARK,
KEYIN3_PORT46_MARK,
KEYIN0_PORT58_MARK, /* MSEL4CR_18_1 */
KEYIN1_PORT57_MARK,
KEYIN2_PORT56_MARK,
KEYIN3_PORT55_MARK,
/* VOU */
DV_D0_MARK, DV_D1_MARK, DV_D2_MARK, DV_D3_MARK,
DV_D4_MARK, DV_D5_MARK, DV_D6_MARK, DV_D7_MARK,
DV_D8_MARK, DV_D9_MARK, DV_D10_MARK, DV_D11_MARK,
DV_D12_MARK, DV_D13_MARK, DV_D14_MARK, DV_D15_MARK,
DV_CLK_MARK, DV_VSYNC_MARK, DV_HSYNC_MARK,
/* MEMC */
MEMC_AD0_MARK, MEMC_AD1_MARK, MEMC_AD2_MARK, MEMC_AD3_MARK,
MEMC_AD4_MARK, MEMC_AD5_MARK, MEMC_AD6_MARK, MEMC_AD7_MARK,
MEMC_AD8_MARK, MEMC_AD9_MARK, MEMC_AD10_MARK, MEMC_AD11_MARK,
MEMC_AD12_MARK, MEMC_AD13_MARK, MEMC_AD14_MARK, MEMC_AD15_MARK,
MEMC_CS0_MARK, MEMC_INT_MARK, MEMC_NWE_MARK, MEMC_NOE_MARK,
MEMC_CS1_MARK, /* MSEL4CR_6_0 */
MEMC_ADV_MARK,
MEMC_WAIT_MARK,
MEMC_BUSCLK_MARK,
MEMC_A1_MARK, /* MSEL4CR_6_1 */
MEMC_DREQ0_MARK,
MEMC_DREQ1_MARK,
MEMC_A0_MARK,
/* MMC */
MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK,
MMC0_D3_PORT71_MARK, MMC0_D4_PORT72_MARK, MMC0_D5_PORT73_MARK,
MMC0_D6_PORT74_MARK, MMC0_D7_PORT75_MARK, MMC0_CLK_PORT66_MARK,
MMC0_CMD_PORT67_MARK, /* MSEL4CR_15_0 */
MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK,
MMC1_D3_PORT146_MARK, MMC1_D4_PORT145_MARK, MMC1_D5_PORT144_MARK,
MMC1_D6_PORT143_MARK, MMC1_D7_PORT142_MARK, MMC1_CLK_PORT103_MARK,
MMC1_CMD_PORT104_MARK, /* MSEL4CR_15_1 */
/* MSIOF0 */
MSIOF0_SS1_MARK, MSIOF0_SS2_MARK, MSIOF0_RXD_MARK,
MSIOF0_TXD_MARK, MSIOF0_MCK0_MARK, MSIOF0_MCK1_MARK,
MSIOF0_RSYNC_MARK, MSIOF0_RSCK_MARK, MSIOF0_TSCK_MARK,
MSIOF0_TSYNC_MARK,
/* MSIOF1 */
MSIOF1_RSCK_MARK, MSIOF1_RSYNC_MARK,
MSIOF1_MCK0_MARK, MSIOF1_MCK1_MARK,
MSIOF1_SS2_PORT116_MARK, MSIOF1_SS1_PORT117_MARK,
MSIOF1_RXD_PORT118_MARK, MSIOF1_TXD_PORT119_MARK,
MSIOF1_TSYNC_PORT120_MARK,
MSIOF1_TSCK_PORT121_MARK, /* MSEL4CR_10_0 */
MSIOF1_SS1_PORT67_MARK, MSIOF1_TSCK_PORT72_MARK,
MSIOF1_TSYNC_PORT73_MARK, MSIOF1_TXD_PORT74_MARK,
MSIOF1_RXD_PORT75_MARK,
MSIOF1_SS2_PORT202_MARK, /* MSEL4CR_10_1 */
/* GPIO */
GPO0_MARK, GPI0_MARK, GPO1_MARK, GPI1_MARK,
/* USB0 */
USB0_OCI_MARK, USB0_PPON_MARK, VBUS_MARK,
/* USB1 */
USB1_OCI_MARK, USB1_PPON_MARK,
/* BBIF1 */
BBIF1_RXD_MARK, BBIF1_TXD_MARK, BBIF1_TSYNC_MARK,
BBIF1_TSCK_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
BBIF1_FLOW_MARK, BBIF1_RX_FLOW_N_MARK,
/* BBIF2 */
BBIF2_TXD2_PORT5_MARK, /* MSEL5CR_0_0 */
BBIF2_RXD2_PORT60_MARK,
BBIF2_TSYNC2_PORT6_MARK,
BBIF2_TSCK2_PORT59_MARK,
BBIF2_RXD2_PORT90_MARK, /* MSEL5CR_0_1 */
BBIF2_TXD2_PORT183_MARK,
BBIF2_TSCK2_PORT89_MARK,
BBIF2_TSYNC2_PORT184_MARK,
/* BSC / FLCTL / PCMCIA */
CS0_MARK, CS2_MARK, CS4_MARK,
CS5B_MARK, CS6A_MARK,
CS5A_PORT105_MARK, /* CS5A PORT 19/105 */
CS5A_PORT19_MARK,
IOIS16_MARK, /* ? */
A0_MARK, A1_MARK, A2_MARK, A3_MARK,
A4_FOE_MARK, /* share with FLCTL */
A5_FCDE_MARK, /* share with FLCTL */
A6_MARK, A7_MARK, A8_MARK, A9_MARK,
A10_MARK, A11_MARK, A12_MARK, A13_MARK,
A14_MARK, A15_MARK, A16_MARK, A17_MARK,
A18_MARK, A19_MARK, A20_MARK, A21_MARK,
A22_MARK, A23_MARK, A24_MARK, A25_MARK,
A26_MARK,
D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, /* share with FLCTL */
D3_NAF3_MARK, D4_NAF4_MARK, D5_NAF5_MARK, /* share with FLCTL */
D6_NAF6_MARK, D7_NAF7_MARK, D8_NAF8_MARK, /* share with FLCTL */
D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK, /* share with FLCTL */
D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, /* share with FLCTL */
D15_NAF15_MARK, /* share with FLCTL */
D16_MARK, D17_MARK, D18_MARK, D19_MARK,
D20_MARK, D21_MARK, D22_MARK, D23_MARK,
D24_MARK, D25_MARK, D26_MARK, D27_MARK,
D28_MARK, D29_MARK, D30_MARK, D31_MARK,
WE0_FWE_MARK, /* share with FLCTL */
WE1_MARK,
WE2_ICIORD_MARK, /* share with PCMCIA */
WE3_ICIOWR_MARK, /* share with PCMCIA */
CKO_MARK, BS_MARK, RDWR_MARK,
RD_FSC_MARK, /* share with FLCTL */
WAIT_PORT177_MARK, /* WAIT Port 90/177 */
WAIT_PORT90_MARK,
FCE0_MARK, FCE1_MARK, FRB_MARK, /* FLCTL */
/* IRDA */
IRDA_FIRSEL_MARK, IRDA_IN_MARK, IRDA_OUT_MARK,
/* ATAPI */
IDE_D0_MARK, IDE_D1_MARK, IDE_D2_MARK, IDE_D3_MARK,
IDE_D4_MARK, IDE_D5_MARK, IDE_D6_MARK, IDE_D7_MARK,
IDE_D8_MARK, IDE_D9_MARK, IDE_D10_MARK, IDE_D11_MARK,
IDE_D12_MARK, IDE_D13_MARK, IDE_D14_MARK, IDE_D15_MARK,
IDE_A0_MARK, IDE_A1_MARK, IDE_A2_MARK, IDE_CS0_MARK,
IDE_CS1_MARK, IDE_IOWR_MARK, IDE_IORD_MARK, IDE_IORDY_MARK,
IDE_INT_MARK, IDE_RST_MARK, IDE_DIRECTION_MARK,
IDE_EXBUF_ENB_MARK, IDE_IODACK_MARK, IDE_IODREQ_MARK,
/* RMII */
RMII_CRS_DV_MARK, RMII_RX_ER_MARK, RMII_RXD0_MARK,
RMII_RXD1_MARK, RMII_TX_EN_MARK, RMII_TXD0_MARK,
RMII_MDC_MARK, RMII_TXD1_MARK, RMII_MDIO_MARK,
RMII_REF50CK_MARK, /* for RMII */
RMII_REF125CK_MARK, /* for GMII */
/* GEther */
ET_TX_CLK_MARK, ET_TX_EN_MARK, ET_ETXD0_MARK, ET_ETXD1_MARK,
ET_ETXD2_MARK, ET_ETXD3_MARK,
ET_ETXD4_MARK, ET_ETXD5_MARK, /* for GEther */
ET_ETXD6_MARK, ET_ETXD7_MARK, /* for GEther */
ET_COL_MARK, ET_TX_ER_MARK, ET_RX_CLK_MARK, ET_RX_DV_MARK,
ET_ERXD0_MARK, ET_ERXD1_MARK, ET_ERXD2_MARK, ET_ERXD3_MARK,
ET_ERXD4_MARK, ET_ERXD5_MARK, /* for GEther */
ET_ERXD6_MARK, ET_ERXD7_MARK, /* for GEther */
ET_RX_ER_MARK, ET_CRS_MARK, ET_MDC_MARK, ET_MDIO_MARK,
ET_LINK_MARK, ET_PHY_INT_MARK, ET_WOL_MARK, ET_GTX_CLK_MARK,
/* DMA0 */
DREQ0_MARK, DACK0_MARK,
/* DMA1 */
DREQ1_MARK, DACK1_MARK,
/* SYSC */
RESETOUTS_MARK, RESETP_PULLUP_MARK, RESETP_PLAIN_MARK,
/* IRREM */
IROUT_MARK,
/* SDENC */
SDENC_CPG_MARK, SDENC_DV_CLKI_MARK,
/* DEBUG */
EDEBGREQ_PULLUP_MARK, /* for JTAG */
EDEBGREQ_PULLDOWN_MARK,
TRACEAUD_FROM_VIO_MARK, /* for TRACE/AUD */
TRACEAUD_FROM_LCDC0_MARK,
TRACEAUD_FROM_MEMC_MARK,
PINMUX_MARK_END,
};
static pinmux_enum_t pinmux_data[] = {
/* specify valid pin states for each pin in GPIO mode */
/* I/O and Pull U/D */
PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1),
PORT_DATA_IO_PD(2), PORT_DATA_IO_PD(3),
PORT_DATA_IO_PD(4), PORT_DATA_IO_PD(5),
PORT_DATA_IO_PD(6), PORT_DATA_IO(7),
PORT_DATA_IO(8), PORT_DATA_IO(9),
PORT_DATA_IO_PD(10), PORT_DATA_IO_PD(11),
PORT_DATA_IO_PD(12), PORT_DATA_IO_PU_PD(13),
PORT_DATA_IO_PD(14), PORT_DATA_IO_PD(15),
PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17),
PORT_DATA_IO(18), PORT_DATA_IO_PU(19),
PORT_DATA_IO_PU_PD(20), PORT_DATA_IO_PD(21),
PORT_DATA_IO_PU_PD(22), PORT_DATA_IO(23),
PORT_DATA_IO_PU(24), PORT_DATA_IO_PU(25),
PORT_DATA_IO_PU(26), PORT_DATA_IO_PU(27),
PORT_DATA_IO_PU(28), PORT_DATA_IO_PU(29),
PORT_DATA_IO_PU(30), PORT_DATA_IO_PD(31),
PORT_DATA_IO_PD(32), PORT_DATA_IO_PD(33),
PORT_DATA_IO_PD(34), PORT_DATA_IO_PU(35),
PORT_DATA_IO_PU(36), PORT_DATA_IO_PD(37),
PORT_DATA_IO_PU(38), PORT_DATA_IO_PD(39),
PORT_DATA_IO_PU_PD(40), PORT_DATA_IO_PD(41),
PORT_DATA_IO_PD(42), PORT_DATA_IO_PU_PD(43),
PORT_DATA_IO_PU_PD(44), PORT_DATA_IO_PU_PD(45),
PORT_DATA_IO_PU_PD(46), PORT_DATA_IO_PU_PD(47),
PORT_DATA_IO_PU_PD(48), PORT_DATA_IO_PU_PD(49),
PORT_DATA_IO_PU_PD(50), PORT_DATA_IO_PD(51),
PORT_DATA_IO_PD(52), PORT_DATA_IO_PD(53),
PORT_DATA_IO_PD(54), PORT_DATA_IO_PU_PD(55),
PORT_DATA_IO_PU_PD(56), PORT_DATA_IO_PU_PD(57),
PORT_DATA_IO_PU_PD(58), PORT_DATA_IO_PU_PD(59),
PORT_DATA_IO_PU_PD(60), PORT_DATA_IO_PD(61),
PORT_DATA_IO_PD(62), PORT_DATA_IO_PD(63),
PORT_DATA_IO_PD(64), PORT_DATA_IO_PD(65),
PORT_DATA_IO_PU_PD(66), PORT_DATA_IO_PU_PD(67),
PORT_DATA_IO_PU_PD(68), PORT_DATA_IO_PU_PD(69),
PORT_DATA_IO_PU_PD(70), PORT_DATA_IO_PU_PD(71),
PORT_DATA_IO_PU_PD(72), PORT_DATA_IO_PU_PD(73),
PORT_DATA_IO_PU_PD(74), PORT_DATA_IO_PU_PD(75),
PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77),
PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79),
PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81),
PORT_DATA_IO(82), PORT_DATA_IO_PU_PD(83),
PORT_DATA_IO(84), PORT_DATA_IO_PD(85),
PORT_DATA_IO_PD(86), PORT_DATA_IO_PD(87),
PORT_DATA_IO_PD(88), PORT_DATA_IO_PD(89),
PORT_DATA_IO_PD(90), PORT_DATA_IO_PU_PD(91),
PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93),
PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95),
PORT_DATA_IO_PU_PD(96), PORT_DATA_IO_PU_PD(97),
PORT_DATA_IO_PU_PD(98), PORT_DATA_IO_PU_PD(99),
PORT_DATA_IO_PU_PD(100), PORT_DATA_IO(101),
PORT_DATA_IO_PU(102), PORT_DATA_IO_PU_PD(103),
PORT_DATA_IO_PU(104), PORT_DATA_IO_PU(105),
PORT_DATA_IO_PU_PD(106), PORT_DATA_IO(107),
PORT_DATA_IO(108), PORT_DATA_IO(109),
PORT_DATA_IO(110), PORT_DATA_IO(111),
PORT_DATA_IO(112), PORT_DATA_IO(113),
PORT_DATA_IO_PU_PD(114), PORT_DATA_IO(115),
PORT_DATA_IO_PD(116), PORT_DATA_IO_PD(117),
PORT_DATA_IO_PD(118), PORT_DATA_IO_PD(119),
PORT_DATA_IO_PD(120), PORT_DATA_IO_PD(121),
PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123),
PORT_DATA_IO_PD(124), PORT_DATA_IO(125),
PORT_DATA_IO(126), PORT_DATA_IO(127),
PORT_DATA_IO(128), PORT_DATA_IO(129),
PORT_DATA_IO(130), PORT_DATA_IO(131),
PORT_DATA_IO(132), PORT_DATA_IO(133),
PORT_DATA_IO(134), PORT_DATA_IO(135),
PORT_DATA_IO(136), PORT_DATA_IO(137),
PORT_DATA_IO(138), PORT_DATA_IO(139),
PORT_DATA_IO(140), PORT_DATA_IO(141),
PORT_DATA_IO_PU(142), PORT_DATA_IO_PU(143),
PORT_DATA_IO_PU(144), PORT_DATA_IO_PU(145),
PORT_DATA_IO_PU(146), PORT_DATA_IO_PU(147),
PORT_DATA_IO_PU(148), PORT_DATA_IO_PU(149),
PORT_DATA_IO_PU(150), PORT_DATA_IO_PU(151),
PORT_DATA_IO_PU(152), PORT_DATA_IO_PU(153),
PORT_DATA_IO_PU(154), PORT_DATA_IO_PU(155),
PORT_DATA_IO_PU(156), PORT_DATA_IO_PU(157),
PORT_DATA_IO_PD(158), PORT_DATA_IO_PD(159),
PORT_DATA_IO_PU_PD(160), PORT_DATA_IO_PD(161),
PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163),
PORT_DATA_IO_PD(164), PORT_DATA_IO_PD(165),
PORT_DATA_IO_PU(166), PORT_DATA_IO_PU(167),
PORT_DATA_IO_PU(168), PORT_DATA_IO_PU(169),
PORT_DATA_IO_PU(170), PORT_DATA_IO_PU(171),
PORT_DATA_IO_PD(172), PORT_DATA_IO_PD(173),
PORT_DATA_IO_PD(174), PORT_DATA_IO_PD(175),
PORT_DATA_IO_PU(176), PORT_DATA_IO_PU_PD(177),
PORT_DATA_IO_PU(178), PORT_DATA_IO_PD(179),
PORT_DATA_IO_PD(180), PORT_DATA_IO_PU(181),
PORT_DATA_IO_PU(182), PORT_DATA_IO(183),
PORT_DATA_IO_PD(184), PORT_DATA_IO_PD(185),
PORT_DATA_IO_PD(186), PORT_DATA_IO_PD(187),
PORT_DATA_IO_PD(188), PORT_DATA_IO_PD(189),
PORT_DATA_IO_PD(190), PORT_DATA_IO_PD(191),
PORT_DATA_IO_PD(192), PORT_DATA_IO_PU_PD(193),
PORT_DATA_IO_PU_PD(194), PORT_DATA_IO_PD(195),
PORT_DATA_IO_PU_PD(196), PORT_DATA_IO_PD(197),
PORT_DATA_IO_PU_PD(198), PORT_DATA_IO_PU_PD(199),
PORT_DATA_IO_PU_PD(200), PORT_DATA_IO_PU(201),
PORT_DATA_IO_PU_PD(202), PORT_DATA_IO(203),
PORT_DATA_IO_PU_PD(204), PORT_DATA_IO_PU_PD(205),
PORT_DATA_IO_PU_PD(206), PORT_DATA_IO_PU_PD(207),
PORT_DATA_IO_PU_PD(208), PORT_DATA_IO_PD(209),
PORT_DATA_IO_PD(210), PORT_DATA_IO_PD(211),
/* Port0 */
PINMUX_DATA(DBGMDT2_MARK, PORT0_FN1),
PINMUX_DATA(FSIAISLD_PORT0_MARK, PORT0_FN2, MSEL5CR_3_0),
PINMUX_DATA(FSIAOSLD1_MARK, PORT0_FN3),
PINMUX_DATA(LCD0_D22_PORT0_MARK, PORT0_FN4, MSEL5CR_6_0),
PINMUX_DATA(SCIFA7_RXD_MARK, PORT0_FN6),
PINMUX_DATA(LCD1_D4_MARK, PORT0_FN7),
PINMUX_DATA(IRQ5_PORT0_MARK, PORT0_FN0, MSEL1CR_5_0),
/* Port1 */
PINMUX_DATA(DBGMDT1_MARK, PORT1_FN1),
PINMUX_DATA(FMSISLD_PORT1_MARK, PORT1_FN2, MSEL5CR_5_0),
PINMUX_DATA(FSIAOSLD2_MARK, PORT1_FN3),
PINMUX_DATA(LCD0_D23_PORT1_MARK, PORT1_FN4, MSEL5CR_6_0),
PINMUX_DATA(SCIFA7_TXD_MARK, PORT1_FN6),
PINMUX_DATA(LCD1_D3_MARK, PORT1_FN7),
PINMUX_DATA(IRQ5_PORT1_MARK, PORT1_FN0, MSEL1CR_5_1),
/* Port2 */
PINMUX_DATA(DBGMDT0_MARK, PORT2_FN1),
PINMUX_DATA(SCIFB_SCK_PORT2_MARK, PORT2_FN2, MSEL5CR_17_1),
PINMUX_DATA(LCD0_D21_PORT2_MARK, PORT2_FN4, MSEL5CR_6_0),
PINMUX_DATA(LCD1_D2_MARK, PORT2_FN7),
PINMUX_DATA(IRQ0_PORT2_MARK, PORT2_FN0, MSEL1CR_0_1),
/* Port3 */
PINMUX_DATA(DBGMD21_MARK, PORT3_FN1),
PINMUX_DATA(SCIFB_RXD_PORT3_MARK, PORT3_FN2, MSEL5CR_17_1),
PINMUX_DATA(LCD0_D20_PORT3_MARK, PORT3_FN4, MSEL5CR_6_0),
PINMUX_DATA(LCD1_D1_MARK, PORT3_FN7),
/* Port4 */
PINMUX_DATA(DBGMD20_MARK, PORT4_FN1),
PINMUX_DATA(SCIFB_TXD_PORT4_MARK, PORT4_FN2, MSEL5CR_17_1),
PINMUX_DATA(LCD0_D19_PORT4_MARK, PORT4_FN4, MSEL5CR_6_0),
PINMUX_DATA(LCD1_D0_MARK, PORT4_FN7),
/* Port5 */
PINMUX_DATA(DBGMD11_MARK, PORT5_FN1),
PINMUX_DATA(BBIF2_TXD2_PORT5_MARK, PORT5_FN2, MSEL5CR_0_0),
PINMUX_DATA(FSIAISLD_PORT5_MARK, PORT5_FN4, MSEL5CR_3_1),
PINMUX_DATA(RSPI_SSL0_A_MARK, PORT5_FN6),
PINMUX_DATA(LCD1_VCPWC_MARK, PORT5_FN7),
/* Port6 */
PINMUX_DATA(DBGMD10_MARK, PORT6_FN1),
PINMUX_DATA(BBIF2_TSYNC2_PORT6_MARK, PORT6_FN2, MSEL5CR_0_0),
PINMUX_DATA(FMSISLD_PORT6_MARK, PORT6_FN4, MSEL5CR_5_1),
PINMUX_DATA(RSPI_SSL1_A_MARK, PORT6_FN6),
PINMUX_DATA(LCD1_VEPWC_MARK, PORT6_FN7),
/* Port7 */
PINMUX_DATA(FSIAOLR_MARK, PORT7_FN1),
/* Port8 */
PINMUX_DATA(FSIAOBT_MARK, PORT8_FN1),
/* Port9 */
PINMUX_DATA(FSIAOSLD_MARK, PORT9_FN1),
PINMUX_DATA(FSIASPDIF_PORT9_MARK, PORT9_FN2, MSEL5CR_4_0),
/* Port10 */
PINMUX_DATA(FSIAOMC_MARK, PORT10_FN1),
PINMUX_DATA(SCIFA5_RXD_PORT10_MARK, PORT10_FN3, MSEL5CR_14_0, MSEL5CR_15_0),
PINMUX_DATA(IRQ3_PORT10_MARK, PORT10_FN0, MSEL1CR_3_0),
/* Port11 */
PINMUX_DATA(FSIACK_MARK, PORT11_FN1),
PINMUX_DATA(IRQ2_PORT11_MARK, PORT11_FN0, MSEL1CR_2_0),
/* Port12 */
PINMUX_DATA(FSIAILR_MARK, PORT12_FN1),
PINMUX_DATA(SCIFA4_RXD_PORT12_MARK, PORT12_FN2, MSEL5CR_12_0, MSEL5CR_11_0),
PINMUX_DATA(LCD1_RS_MARK, PORT12_FN6),
PINMUX_DATA(LCD1_DISP_MARK, PORT12_FN7),
PINMUX_DATA(IRQ2_PORT12_MARK, PORT12_FN0, MSEL1CR_2_1),
/* Port13 */
PINMUX_DATA(FSIAIBT_MARK, PORT13_FN1),
PINMUX_DATA(SCIFA4_TXD_PORT13_MARK, PORT13_FN2, MSEL5CR_12_0, MSEL5CR_11_0),
PINMUX_DATA(LCD1_RD_MARK, PORT13_FN7),
PINMUX_DATA(IRQ0_PORT13_MARK, PORT13_FN0, MSEL1CR_0_0),
/* Port14 */
PINMUX_DATA(FMSOILR_MARK, PORT14_FN1),
PINMUX_DATA(FMSIILR_MARK, PORT14_FN2),
PINMUX_DATA(VIO_CKO1_MARK, PORT14_FN3),
PINMUX_DATA(LCD1_D23_MARK, PORT14_FN7),
PINMUX_DATA(IRQ3_PORT14_MARK, PORT14_FN0, MSEL1CR_3_1),
/* Port15 */
PINMUX_DATA(FMSOIBT_MARK, PORT15_FN1),
PINMUX_DATA(FMSIIBT_MARK, PORT15_FN2),
PINMUX_DATA(VIO_CKO2_MARK, PORT15_FN3),
PINMUX_DATA(LCD1_D22_MARK, PORT15_FN7),
PINMUX_DATA(IRQ4_PORT15_MARK, PORT15_FN0, MSEL1CR_4_0),
/* Port16 */
PINMUX_DATA(FMSOOLR_MARK, PORT16_FN1),
PINMUX_DATA(FMSIOLR_MARK, PORT16_FN2),
/* Port17 */
PINMUX_DATA(FMSOOBT_MARK, PORT17_FN1),
PINMUX_DATA(FMSIOBT_MARK, PORT17_FN2),
/* Port18 */
PINMUX_DATA(FMSOSLD_MARK, PORT18_FN1),
PINMUX_DATA(FSIASPDIF_PORT18_MARK, PORT18_FN2, MSEL5CR_4_1),
/* Port19 */
PINMUX_DATA(FMSICK_MARK, PORT19_FN1),
PINMUX_DATA(CS5A_PORT19_MARK, PORT19_FN7, MSEL5CR_2_1),
PINMUX_DATA(IRQ10_MARK, PORT19_FN0),
/* Port20 */
PINMUX_DATA(FMSOCK_MARK, PORT20_FN1),
PINMUX_DATA(SCIFA5_TXD_PORT20_MARK, PORT20_FN3, MSEL5CR_15_0, MSEL5CR_14_0),
PINMUX_DATA(IRQ1_MARK, PORT20_FN0),
/* Port21 */
PINMUX_DATA(SCIFA1_CTS_MARK, PORT21_FN1),
PINMUX_DATA(SCIFA4_SCK_PORT21_MARK, PORT21_FN2, MSEL5CR_10_0),
PINMUX_DATA(TPU0TO1_MARK, PORT21_FN4),
PINMUX_DATA(VIO1_FIELD_MARK, PORT21_FN5),
PINMUX_DATA(STP0_IPD5_MARK, PORT21_FN6),
PINMUX_DATA(LCD1_D10_MARK, PORT21_FN7),
/* Port22 */
PINMUX_DATA(SCIFA2_SCK_PORT22_MARK, PORT22_FN1, MSEL5CR_7_0),
PINMUX_DATA(SIM_D_PORT22_MARK, PORT22_FN4, MSEL5CR_21_0),
PINMUX_DATA(VIO0_D13_PORT22_MARK, PORT22_FN7, MSEL5CR_27_1),
/* Port23 */
PINMUX_DATA(SCIFA1_RTS_MARK, PORT23_FN1),
PINMUX_DATA(SCIFA5_SCK_PORT23_MARK, PORT23_FN3, MSEL5CR_13_0),
PINMUX_DATA(TPU0TO0_MARK, PORT23_FN4),
PINMUX_DATA(VIO_CKO_1_MARK, PORT23_FN5),
PINMUX_DATA(STP0_IPD2_MARK, PORT23_FN6),
PINMUX_DATA(LCD1_D7_MARK, PORT23_FN7),
/* Port24 */
PINMUX_DATA(VIO0_D15_PORT24_MARK, PORT24_FN1, MSEL5CR_27_0),
PINMUX_DATA(VIO1_D7_MARK, PORT24_FN5),
PINMUX_DATA(SCIFA6_SCK_MARK, PORT24_FN6),
PINMUX_DATA(SDHI2_CD_PORT24_MARK, PORT24_FN7, MSEL5CR_19_0),
/* Port25 */
PINMUX_DATA(VIO0_D14_PORT25_MARK, PORT25_FN1, MSEL5CR_27_0),
PINMUX_DATA(VIO1_D6_MARK, PORT25_FN5),
PINMUX_DATA(SCIFA6_RXD_MARK, PORT25_FN6),
PINMUX_DATA(SDHI2_WP_PORT25_MARK, PORT25_FN7, MSEL5CR_19_0),
/* Port26 */
PINMUX_DATA(VIO0_D13_PORT26_MARK, PORT26_FN1, MSEL5CR_27_0),
PINMUX_DATA(VIO1_D5_MARK, PORT26_FN5),
PINMUX_DATA(SCIFA6_TXD_MARK, PORT26_FN6),
/* Port27 - Port39 Function */
PINMUX_DATA(VIO0_D7_MARK, PORT27_FN1),
PINMUX_DATA(VIO0_D6_MARK, PORT28_FN1),
PINMUX_DATA(VIO0_D5_MARK, PORT29_FN1),
PINMUX_DATA(VIO0_D4_MARK, PORT30_FN1),
PINMUX_DATA(VIO0_D3_MARK, PORT31_FN1),
PINMUX_DATA(VIO0_D2_MARK, PORT32_FN1),
PINMUX_DATA(VIO0_D1_MARK, PORT33_FN1),
PINMUX_DATA(VIO0_D0_MARK, PORT34_FN1),
PINMUX_DATA(VIO0_CLK_MARK, PORT35_FN1),
PINMUX_DATA(VIO_CKO_MARK, PORT36_FN1),
PINMUX_DATA(VIO0_HD_MARK, PORT37_FN1),
PINMUX_DATA(VIO0_FIELD_MARK, PORT38_FN1),
PINMUX_DATA(VIO0_VD_MARK, PORT39_FN1),
/* Port38 IRQ */
PINMUX_DATA(IRQ25_MARK, PORT38_FN0),
/* Port40 */
PINMUX_DATA(LCD0_D18_PORT40_MARK, PORT40_FN4, MSEL5CR_6_0),
PINMUX_DATA(RSPI_CK_A_MARK, PORT40_FN6),
PINMUX_DATA(LCD1_LCLK_MARK, PORT40_FN7),
/* Port41 */
PINMUX_DATA(LCD0_D17_MARK, PORT41_FN1),
PINMUX_DATA(MSIOF2_SS1_MARK, PORT41_FN2),
PINMUX_DATA(IRQ31_PORT41_MARK, PORT41_FN0, MSEL1CR_31_1),
/* Port42 */
PINMUX_DATA(LCD0_D16_MARK, PORT42_FN1),
PINMUX_DATA(MSIOF2_MCK1_MARK, PORT42_FN2),
PINMUX_DATA(IRQ12_PORT42_MARK, PORT42_FN0, MSEL1CR_12_1),
/* Port43 */
PINMUX_DATA(LCD0_D15_MARK, PORT43_FN1),
PINMUX_DATA(MSIOF2_MCK0_MARK, PORT43_FN2),
PINMUX_DATA(KEYIN0_PORT43_MARK, PORT43_FN3, MSEL4CR_18_0),
PINMUX_DATA(DV_D15_MARK, PORT43_FN6),
/* Port44 */
PINMUX_DATA(LCD0_D14_MARK, PORT44_FN1),
PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT44_FN2),
PINMUX_DATA(KEYIN1_PORT44_MARK, PORT44_FN3, MSEL4CR_18_0),
PINMUX_DATA(DV_D14_MARK, PORT44_FN6),
/* Port45 */
PINMUX_DATA(LCD0_D13_MARK, PORT45_FN1),
PINMUX_DATA(MSIOF2_RSCK_MARK, PORT45_FN2),
PINMUX_DATA(KEYIN2_PORT45_MARK, PORT45_FN3, MSEL4CR_18_0),
PINMUX_DATA(DV_D13_MARK, PORT45_FN6),
/* Port46 */
PINMUX_DATA(LCD0_D12_MARK, PORT46_FN1),
PINMUX_DATA(KEYIN3_PORT46_MARK, PORT46_FN3, MSEL4CR_18_0),
PINMUX_DATA(DV_D12_MARK, PORT46_FN6),
/* Port47 */
PINMUX_DATA(LCD0_D11_MARK, PORT47_FN1),
PINMUX_DATA(KEYIN4_MARK, PORT47_FN3),
PINMUX_DATA(DV_D11_MARK, PORT47_FN6),
/* Port48 */
PINMUX_DATA(LCD0_D10_MARK, PORT48_FN1),
PINMUX_DATA(KEYIN5_MARK, PORT48_FN3),
PINMUX_DATA(DV_D10_MARK, PORT48_FN6),
/* Port49 */
PINMUX_DATA(LCD0_D9_MARK, PORT49_FN1),
PINMUX_DATA(KEYIN6_MARK, PORT49_FN3),
PINMUX_DATA(DV_D9_MARK, PORT49_FN6),
PINMUX_DATA(IRQ30_PORT49_MARK, PORT49_FN0, MSEL1CR_30_1),
/* Port50 */
PINMUX_DATA(LCD0_D8_MARK, PORT50_FN1),
PINMUX_DATA(KEYIN7_MARK, PORT50_FN3),
PINMUX_DATA(DV_D8_MARK, PORT50_FN6),
PINMUX_DATA(IRQ29_PORT50_MARK, PORT50_FN0, MSEL1CR_29_1),
/* Port51 */
PINMUX_DATA(LCD0_D7_MARK, PORT51_FN1),
PINMUX_DATA(KEYOUT0_MARK, PORT51_FN3),
PINMUX_DATA(DV_D7_MARK, PORT51_FN6),
/* Port52 */
PINMUX_DATA(LCD0_D6_MARK, PORT52_FN1),
PINMUX_DATA(KEYOUT1_MARK, PORT52_FN3),
PINMUX_DATA(DV_D6_MARK, PORT52_FN6),
/* Port53 */
PINMUX_DATA(LCD0_D5_MARK, PORT53_FN1),
PINMUX_DATA(KEYOUT2_MARK, PORT53_FN3),
PINMUX_DATA(DV_D5_MARK, PORT53_FN6),
/* Port54 */
PINMUX_DATA(LCD0_D4_MARK, PORT54_FN1),
PINMUX_DATA(KEYOUT3_MARK, PORT54_FN3),
PINMUX_DATA(DV_D4_MARK, PORT54_FN6),
/* Port55 */
PINMUX_DATA(LCD0_D3_MARK, PORT55_FN1),
PINMUX_DATA(KEYOUT4_MARK, PORT55_FN3),
PINMUX_DATA(KEYIN3_PORT55_MARK, PORT55_FN4, MSEL4CR_18_1),
PINMUX_DATA(DV_D3_MARK, PORT55_FN6),
/* Port56 */
PINMUX_DATA(LCD0_D2_MARK, PORT56_FN1),
PINMUX_DATA(KEYOUT5_MARK, PORT56_FN3),
PINMUX_DATA(KEYIN2_PORT56_MARK, PORT56_FN4, MSEL4CR_18_1),
PINMUX_DATA(DV_D2_MARK, PORT56_FN6),
PINMUX_DATA(IRQ28_PORT56_MARK, PORT56_FN0, MSEL1CR_28_1),
/* Port57 */
PINMUX_DATA(LCD0_D1_MARK, PORT57_FN1),
PINMUX_DATA(KEYOUT6_MARK, PORT57_FN3),
PINMUX_DATA(KEYIN1_PORT57_MARK, PORT57_FN4, MSEL4CR_18_1),
PINMUX_DATA(DV_D1_MARK, PORT57_FN6),
PINMUX_DATA(IRQ27_PORT57_MARK, PORT57_FN0, MSEL1CR_27_1),
/* Port58 */
PINMUX_DATA(LCD0_D0_MARK, PORT58_FN1),
PINMUX_DATA(KEYOUT7_MARK, PORT58_FN3),
PINMUX_DATA(KEYIN0_PORT58_MARK, PORT58_FN4, MSEL4CR_18_1),
PINMUX_DATA(DV_D0_MARK, PORT58_FN6),
PINMUX_DATA(IRQ26_PORT58_MARK, PORT58_FN0, MSEL1CR_26_1),
/* Port59 */
PINMUX_DATA(LCD0_VCPWC_MARK, PORT59_FN1),
PINMUX_DATA(BBIF2_TSCK2_PORT59_MARK, PORT59_FN2, MSEL5CR_0_0),
PINMUX_DATA(RSPI_MOSI_A_MARK, PORT59_FN6),
/* Port60 */
PINMUX_DATA(LCD0_VEPWC_MARK, PORT60_FN1),
PINMUX_DATA(BBIF2_RXD2_PORT60_MARK, PORT60_FN2, MSEL5CR_0_0),
PINMUX_DATA(RSPI_MISO_A_MARK, PORT60_FN6),
/* Port61 */
PINMUX_DATA(LCD0_DON_MARK, PORT61_FN1),
PINMUX_DATA(MSIOF2_TXD_MARK, PORT61_FN2),
/* Port62 */
PINMUX_DATA(LCD0_DCK_MARK, PORT62_FN1),
PINMUX_DATA(LCD0_WR_MARK, PORT62_FN4),
PINMUX_DATA(DV_CLK_MARK, PORT62_FN6),
PINMUX_DATA(IRQ15_PORT62_MARK, PORT62_FN0, MSEL1CR_15_1),
/* Port63 */
PINMUX_DATA(LCD0_VSYN_MARK, PORT63_FN1),
PINMUX_DATA(DV_VSYNC_MARK, PORT63_FN6),
PINMUX_DATA(IRQ14_PORT63_MARK, PORT63_FN0, MSEL1CR_14_1),
/* Port64 */
PINMUX_DATA(LCD0_HSYN_MARK, PORT64_FN1),
PINMUX_DATA(LCD0_CS_MARK, PORT64_FN4),
PINMUX_DATA(DV_HSYNC_MARK, PORT64_FN6),
PINMUX_DATA(IRQ13_PORT64_MARK, PORT64_FN0, MSEL1CR_13_1),
/* Port65 */
PINMUX_DATA(LCD0_DISP_MARK, PORT65_FN1),
PINMUX_DATA(MSIOF2_TSCK_MARK, PORT65_FN2),
PINMUX_DATA(LCD0_RS_MARK, PORT65_FN4),
/* Port66 */
PINMUX_DATA(MEMC_INT_MARK, PORT66_FN1),
PINMUX_DATA(TPU0TO2_PORT66_MARK, PORT66_FN3, MSEL5CR_25_0),
PINMUX_DATA(MMC0_CLK_PORT66_MARK, PORT66_FN4, MSEL4CR_15_0),
PINMUX_DATA(SDHI1_CLK_MARK, PORT66_FN6),
/* Port67 - Port73 Function1 */
PINMUX_DATA(MEMC_CS0_MARK, PORT67_FN1),
PINMUX_DATA(MEMC_AD8_MARK, PORT68_FN1),
PINMUX_DATA(MEMC_AD9_MARK, PORT69_FN1),
PINMUX_DATA(MEMC_AD10_MARK, PORT70_FN1),
PINMUX_DATA(MEMC_AD11_MARK, PORT71_FN1),
PINMUX_DATA(MEMC_AD12_MARK, PORT72_FN1),
PINMUX_DATA(MEMC_AD13_MARK, PORT73_FN1),
/* Port67 - Port73 Function2 */
PINMUX_DATA(MSIOF1_SS1_PORT67_MARK, PORT67_FN2, MSEL4CR_10_1),
PINMUX_DATA(MSIOF1_RSCK_MARK, PORT68_FN2),
PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT69_FN2),
PINMUX_DATA(MSIOF1_MCK0_MARK, PORT70_FN2),
PINMUX_DATA(MSIOF1_MCK1_MARK, PORT71_FN2),
PINMUX_DATA(MSIOF1_TSCK_PORT72_MARK, PORT72_FN2, MSEL4CR_10_1),
PINMUX_DATA(MSIOF1_TSYNC_PORT73_MARK, PORT73_FN2, MSEL4CR_10_1),
/* Port67 - Port73 Function4 */
PINMUX_DATA(MMC0_CMD_PORT67_MARK, PORT67_FN4, MSEL4CR_15_0),
PINMUX_DATA(MMC0_D0_PORT68_MARK, PORT68_FN4, MSEL4CR_15_0),
PINMUX_DATA(MMC0_D1_PORT69_MARK, PORT69_FN4, MSEL4CR_15_0),
PINMUX_DATA(MMC0_D2_PORT70_MARK, PORT70_FN4, MSEL4CR_15_0),
PINMUX_DATA(MMC0_D3_PORT71_MARK, PORT71_FN4, MSEL4CR_15_0),
PINMUX_DATA(MMC0_D4_PORT72_MARK, PORT72_FN4, MSEL4CR_15_0),
PINMUX_DATA(MMC0_D5_PORT73_MARK, PORT73_FN4, MSEL4CR_15_0),
/* Port67 - Port73 Function6 */
PINMUX_DATA(SDHI1_CMD_MARK, PORT67_FN6),
PINMUX_DATA(SDHI1_D0_MARK, PORT68_FN6),
PINMUX_DATA(SDHI1_D1_MARK, PORT69_FN6),
PINMUX_DATA(SDHI1_D2_MARK, PORT70_FN6),
PINMUX_DATA(SDHI1_D3_MARK, PORT71_FN6),
PINMUX_DATA(SDHI1_CD_MARK, PORT72_FN6),
PINMUX_DATA(SDHI1_WP_MARK, PORT73_FN6),
/* Port67 - Port71 IRQ */
PINMUX_DATA(IRQ20_MARK, PORT67_FN0),
PINMUX_DATA(IRQ16_PORT68_MARK, PORT68_FN0, MSEL1CR_16_0),
PINMUX_DATA(IRQ17_MARK, PORT69_FN0),
PINMUX_DATA(IRQ18_MARK, PORT70_FN0),
PINMUX_DATA(IRQ19_MARK, PORT71_FN0),
/* Port74 */
PINMUX_DATA(MEMC_AD14_MARK, PORT74_FN1),
PINMUX_DATA(MSIOF1_TXD_PORT74_MARK, PORT74_FN2, MSEL4CR_10_1),
PINMUX_DATA(MMC0_D6_PORT74_MARK, PORT74_FN4, MSEL4CR_15_0),
PINMUX_DATA(STP1_IPD7_MARK, PORT74_FN6),
PINMUX_DATA(LCD1_D21_MARK, PORT74_FN7),
/* Port75 */
PINMUX_DATA(MEMC_AD15_MARK, PORT75_FN1),
PINMUX_DATA(MSIOF1_RXD_PORT75_MARK, PORT75_FN2, MSEL4CR_10_1),
PINMUX_DATA(MMC0_D7_PORT75_MARK, PORT75_FN4, MSEL4CR_15_0),
PINMUX_DATA(STP1_IPD6_MARK, PORT75_FN6),
PINMUX_DATA(LCD1_D20_MARK, PORT75_FN7),
/* Port76 - Port80 Function */
PINMUX_DATA(SDHI0_CMD_MARK, PORT76_FN1),
PINMUX_DATA(SDHI0_D0_MARK, PORT77_FN1),
PINMUX_DATA(SDHI0_D1_MARK, PORT78_FN1),
PINMUX_DATA(SDHI0_D2_MARK, PORT79_FN1),
PINMUX_DATA(SDHI0_D3_MARK, PORT80_FN1),
/* Port81 */
PINMUX_DATA(SDHI0_CD_MARK, PORT81_FN1),
PINMUX_DATA(IRQ26_PORT81_MARK, PORT81_FN0, MSEL1CR_26_0),
/* Port82 - Port88 Function */
PINMUX_DATA(SDHI0_CLK_MARK, PORT82_FN1),
PINMUX_DATA(SDHI0_WP_MARK, PORT83_FN1),
PINMUX_DATA(RESETOUTS_MARK, PORT84_FN1),
PINMUX_DATA(USB0_PPON_MARK, PORT85_FN1),
PINMUX_DATA(USB0_OCI_MARK, PORT86_FN1),
PINMUX_DATA(USB1_PPON_MARK, PORT87_FN1),
PINMUX_DATA(USB1_OCI_MARK, PORT88_FN1),
/* Port89 */
PINMUX_DATA(DREQ0_MARK, PORT89_FN1),
PINMUX_DATA(BBIF2_TSCK2_PORT89_MARK, PORT89_FN2, MSEL5CR_0_1),
PINMUX_DATA(RSPI_SSL3_A_MARK, PORT89_FN6),
/* Port90 */
PINMUX_DATA(DACK0_MARK, PORT90_FN1),
PINMUX_DATA(BBIF2_RXD2_PORT90_MARK, PORT90_FN2, MSEL5CR_0_1),
PINMUX_DATA(RSPI_SSL2_A_MARK, PORT90_FN6),
PINMUX_DATA(WAIT_PORT90_MARK, PORT90_FN7, MSEL5CR_2_1),
/* Port91 */
PINMUX_DATA(MEMC_AD0_MARK, PORT91_FN1),
PINMUX_DATA(BBIF1_RXD_MARK, PORT91_FN2),
PINMUX_DATA(SCIFA5_TXD_PORT91_MARK, PORT91_FN3, MSEL5CR_15_1, MSEL5CR_14_0),
PINMUX_DATA(LCD1_D5_MARK, PORT91_FN7),
/* Port92 */
PINMUX_DATA(MEMC_AD1_MARK, PORT92_FN1),
PINMUX_DATA(BBIF1_TSYNC_MARK, PORT92_FN2),
PINMUX_DATA(SCIFA5_RXD_PORT92_MARK, PORT92_FN3, MSEL5CR_15_1, MSEL5CR_14_0),
PINMUX_DATA(STP0_IPD1_MARK, PORT92_FN6),
PINMUX_DATA(LCD1_D6_MARK, PORT92_FN7),
/* Port93 */
PINMUX_DATA(MEMC_AD2_MARK, PORT93_FN1),
PINMUX_DATA(BBIF1_TSCK_MARK, PORT93_FN2),
PINMUX_DATA(SCIFA4_TXD_PORT93_MARK, PORT93_FN3, MSEL5CR_12_1, MSEL5CR_11_0),
PINMUX_DATA(STP0_IPD3_MARK, PORT93_FN6),
PINMUX_DATA(LCD1_D8_MARK, PORT93_FN7),
/* Port94 */
PINMUX_DATA(MEMC_AD3_MARK, PORT94_FN1),
PINMUX_DATA(BBIF1_TXD_MARK, PORT94_FN2),
PINMUX_DATA(SCIFA4_RXD_PORT94_MARK, PORT94_FN3, MSEL5CR_12_1, MSEL5CR_11_0),
PINMUX_DATA(STP0_IPD4_MARK, PORT94_FN6),
PINMUX_DATA(LCD1_D9_MARK, PORT94_FN7),
/* Port95 */
PINMUX_DATA(MEMC_CS1_MARK, PORT95_FN1, MSEL4CR_6_0),
PINMUX_DATA(MEMC_A1_MARK, PORT95_FN1, MSEL4CR_6_1),
PINMUX_DATA(SCIFA2_CTS_MARK, PORT95_FN2),
PINMUX_DATA(SIM_RST_MARK, PORT95_FN4),
PINMUX_DATA(VIO0_D14_PORT95_MARK, PORT95_FN7, MSEL5CR_27_1),
PINMUX_DATA(IRQ22_MARK, PORT95_FN0),
/* Port96 */
PINMUX_DATA(MEMC_ADV_MARK, PORT96_FN1, MSEL4CR_6_0),
PINMUX_DATA(MEMC_DREQ0_MARK, PORT96_FN1, MSEL4CR_6_1),
PINMUX_DATA(SCIFA2_RTS_MARK, PORT96_FN2),
PINMUX_DATA(SIM_CLK_MARK, PORT96_FN4),
PINMUX_DATA(VIO0_D15_PORT96_MARK, PORT96_FN7, MSEL5CR_27_1),
PINMUX_DATA(IRQ23_MARK, PORT96_FN0),
/* Port97 */
PINMUX_DATA(MEMC_AD4_MARK, PORT97_FN1),
PINMUX_DATA(BBIF1_RSCK_MARK, PORT97_FN2),
PINMUX_DATA(LCD1_CS_MARK, PORT97_FN6),
PINMUX_DATA(LCD1_HSYN_MARK, PORT97_FN7),
PINMUX_DATA(IRQ12_PORT97_MARK, PORT97_FN0, MSEL1CR_12_0),
/* Port98 */
PINMUX_DATA(MEMC_AD5_MARK, PORT98_FN1),
PINMUX_DATA(BBIF1_RSYNC_MARK, PORT98_FN2),
PINMUX_DATA(LCD1_VSYN_MARK, PORT98_FN7),
PINMUX_DATA(IRQ13_PORT98_MARK, PORT98_FN0, MSEL1CR_13_0),
/* Port99 */
PINMUX_DATA(MEMC_AD6_MARK, PORT99_FN1),
PINMUX_DATA(BBIF1_FLOW_MARK, PORT99_FN2),
PINMUX_DATA(LCD1_WR_MARK, PORT99_FN6),
PINMUX_DATA(LCD1_DCK_MARK, PORT99_FN7),
PINMUX_DATA(IRQ14_PORT99_MARK, PORT99_FN0, MSEL1CR_14_0),
/* Port100 */
PINMUX_DATA(MEMC_AD7_MARK, PORT100_FN1),
PINMUX_DATA(BBIF1_RX_FLOW_N_MARK, PORT100_FN2),
PINMUX_DATA(LCD1_DON_MARK, PORT100_FN7),
PINMUX_DATA(IRQ15_PORT100_MARK, PORT100_FN0, MSEL1CR_15_0),
/* Port101 */
PINMUX_DATA(FCE0_MARK, PORT101_FN1),
/* Port102 */
PINMUX_DATA(FRB_MARK, PORT102_FN1),
PINMUX_DATA(LCD0_LCLK_PORT102_MARK, PORT102_FN4, MSEL5CR_6_0),
/* Port103 */
PINMUX_DATA(CS5B_MARK, PORT103_FN1),
PINMUX_DATA(FCE1_MARK, PORT103_FN2),
PINMUX_DATA(MMC1_CLK_PORT103_MARK, PORT103_FN3, MSEL4CR_15_1),
/* Port104 */
PINMUX_DATA(CS6A_MARK, PORT104_FN1),
PINMUX_DATA(MMC1_CMD_PORT104_MARK, PORT104_FN3, MSEL4CR_15_1),
PINMUX_DATA(IRQ11_MARK, PORT104_FN0),
/* Port105 */
PINMUX_DATA(CS5A_PORT105_MARK, PORT105_FN1, MSEL5CR_2_0),
PINMUX_DATA(SCIFA3_RTS_PORT105_MARK, PORT105_FN4, MSEL5CR_8_0),
/* Port106 */
PINMUX_DATA(IOIS16_MARK, PORT106_FN1),
PINMUX_DATA(IDE_EXBUF_ENB_MARK, PORT106_FN6),
/* Port107 - Port115 Function */
PINMUX_DATA(WE3_ICIOWR_MARK, PORT107_FN1),
PINMUX_DATA(WE2_ICIORD_MARK, PORT108_FN1),
PINMUX_DATA(CS0_MARK, PORT109_FN1),
PINMUX_DATA(CS2_MARK, PORT110_FN1),
PINMUX_DATA(CS4_MARK, PORT111_FN1),
PINMUX_DATA(WE1_MARK, PORT112_FN1),
PINMUX_DATA(WE0_FWE_MARK, PORT113_FN1),
PINMUX_DATA(RDWR_MARK, PORT114_FN1),
PINMUX_DATA(RD_FSC_MARK, PORT115_FN1),
/* Port116 */
PINMUX_DATA(A25_MARK, PORT116_FN1),
PINMUX_DATA(MSIOF0_SS2_MARK, PORT116_FN2),
PINMUX_DATA(MSIOF1_SS2_PORT116_MARK, PORT116_FN3, MSEL4CR_10_0),
PINMUX_DATA(SCIFA3_SCK_PORT116_MARK, PORT116_FN4, MSEL5CR_8_0),
PINMUX_DATA(GPO1_MARK, PORT116_FN5),
/* Port117 */
PINMUX_DATA(A24_MARK, PORT117_FN1),
PINMUX_DATA(MSIOF0_SS1_MARK, PORT117_FN2),
PINMUX_DATA(MSIOF1_SS1_PORT117_MARK, PORT117_FN3, MSEL4CR_10_0),
PINMUX_DATA(SCIFA3_CTS_PORT117_MARK, PORT117_FN4, MSEL5CR_8_0),
PINMUX_DATA(GPO0_MARK, PORT117_FN5),
/* Port118 */
PINMUX_DATA(A23_MARK, PORT118_FN1),
PINMUX_DATA(MSIOF0_MCK1_MARK, PORT118_FN2),
PINMUX_DATA(MSIOF1_RXD_PORT118_MARK, PORT118_FN3, MSEL4CR_10_0),
PINMUX_DATA(GPI1_MARK, PORT118_FN5),
PINMUX_DATA(IRQ9_PORT118_MARK, PORT118_FN0, MSEL1CR_9_0),
/* Port119 */
PINMUX_DATA(A22_MARK, PORT119_FN1),
PINMUX_DATA(MSIOF0_MCK0_MARK, PORT119_FN2),
PINMUX_DATA(MSIOF1_TXD_PORT119_MARK, PORT119_FN3, MSEL4CR_10_0),
PINMUX_DATA(GPI0_MARK, PORT119_FN5),
PINMUX_DATA(IRQ8_MARK, PORT119_FN0),
/* Port120 */
PINMUX_DATA(A21_MARK, PORT120_FN1),
PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT120_FN2),
PINMUX_DATA(MSIOF1_TSYNC_PORT120_MARK, PORT120_FN3, MSEL4CR_10_0),
PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_0),
/* Port121 */
PINMUX_DATA(A20_MARK, PORT121_FN1),
PINMUX_DATA(MSIOF0_RSCK_MARK, PORT121_FN2),
PINMUX_DATA(MSIOF1_TSCK_PORT121_MARK, PORT121_FN3, MSEL4CR_10_0),
PINMUX_DATA(IRQ6_PORT121_MARK, PORT121_FN0, MSEL1CR_6_0),
/* Port122 */
PINMUX_DATA(A19_MARK, PORT122_FN1),
PINMUX_DATA(MSIOF0_RXD_MARK, PORT122_FN2),
/* Port123 */
PINMUX_DATA(A18_MARK, PORT123_FN1),
PINMUX_DATA(MSIOF0_TSCK_MARK, PORT123_FN2),
/* Port124 */
PINMUX_DATA(A17_MARK, PORT124_FN1),
PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT124_FN2),
/* Port125 - Port141 Function */
PINMUX_DATA(A16_MARK, PORT125_FN1),
PINMUX_DATA(A15_MARK, PORT126_FN1),
PINMUX_DATA(A14_MARK, PORT127_FN1),
PINMUX_DATA(A13_MARK, PORT128_FN1),
PINMUX_DATA(A12_MARK, PORT129_FN1),
PINMUX_DATA(A11_MARK, PORT130_FN1),
PINMUX_DATA(A10_MARK, PORT131_FN1),
PINMUX_DATA(A9_MARK, PORT132_FN1),
PINMUX_DATA(A8_MARK, PORT133_FN1),
PINMUX_DATA(A7_MARK, PORT134_FN1),
PINMUX_DATA(A6_MARK, PORT135_FN1),
PINMUX_DATA(A5_FCDE_MARK, PORT136_FN1),
PINMUX_DATA(A4_FOE_MARK, PORT137_FN1),
PINMUX_DATA(A3_MARK, PORT138_FN1),
PINMUX_DATA(A2_MARK, PORT139_FN1),
PINMUX_DATA(A1_MARK, PORT140_FN1),
PINMUX_DATA(CKO_MARK, PORT141_FN1),
/* Port142 - Port157 Function1 */
PINMUX_DATA(D15_NAF15_MARK, PORT142_FN1),
PINMUX_DATA(D14_NAF14_MARK, PORT143_FN1),
PINMUX_DATA(D13_NAF13_MARK, PORT144_FN1),
PINMUX_DATA(D12_NAF12_MARK, PORT145_FN1),
PINMUX_DATA(D11_NAF11_MARK, PORT146_FN1),
PINMUX_DATA(D10_NAF10_MARK, PORT147_FN1),
PINMUX_DATA(D9_NAF9_MARK, PORT148_FN1),
PINMUX_DATA(D8_NAF8_MARK, PORT149_FN1),
PINMUX_DATA(D7_NAF7_MARK, PORT150_FN1),
PINMUX_DATA(D6_NAF6_MARK, PORT151_FN1),
PINMUX_DATA(D5_NAF5_MARK, PORT152_FN1),
PINMUX_DATA(D4_NAF4_MARK, PORT153_FN1),
PINMUX_DATA(D3_NAF3_MARK, PORT154_FN1),
PINMUX_DATA(D2_NAF2_MARK, PORT155_FN1),
PINMUX_DATA(D1_NAF1_MARK, PORT156_FN1),
PINMUX_DATA(D0_NAF0_MARK, PORT157_FN1),
/* Port142 - Port149 Function3 */
PINMUX_DATA(MMC1_D7_PORT142_MARK, PORT142_FN3, MSEL4CR_15_1),
PINMUX_DATA(MMC1_D6_PORT143_MARK, PORT143_FN3, MSEL4CR_15_1),
PINMUX_DATA(MMC1_D5_PORT144_MARK, PORT144_FN3, MSEL4CR_15_1),
PINMUX_DATA(MMC1_D4_PORT145_MARK, PORT145_FN3, MSEL4CR_15_1),
PINMUX_DATA(MMC1_D3_PORT146_MARK, PORT146_FN3, MSEL4CR_15_1),
PINMUX_DATA(MMC1_D2_PORT147_MARK, PORT147_FN3, MSEL4CR_15_1),
PINMUX_DATA(MMC1_D1_PORT148_MARK, PORT148_FN3, MSEL4CR_15_1),
PINMUX_DATA(MMC1_D0_PORT149_MARK, PORT149_FN3, MSEL4CR_15_1),
/* Port158 */
PINMUX_DATA(D31_MARK, PORT158_FN1),
PINMUX_DATA(SCIFA3_SCK_PORT158_MARK, PORT158_FN2, MSEL5CR_8_1),
PINMUX_DATA(RMII_REF125CK_MARK, PORT158_FN3),
PINMUX_DATA(LCD0_D21_PORT158_MARK, PORT158_FN4, MSEL5CR_6_1),
PINMUX_DATA(IRDA_FIRSEL_MARK, PORT158_FN5),
PINMUX_DATA(IDE_D15_MARK, PORT158_FN6),
/* Port159 */
PINMUX_DATA(D30_MARK, PORT159_FN1),
PINMUX_DATA(SCIFA3_RXD_PORT159_MARK, PORT159_FN2, MSEL5CR_8_1),
PINMUX_DATA(RMII_REF50CK_MARK, PORT159_FN3),
PINMUX_DATA(LCD0_D23_PORT159_MARK, PORT159_FN4, MSEL5CR_6_1),
PINMUX_DATA(IDE_D14_MARK, PORT159_FN6),
/* Port160 */
PINMUX_DATA(D29_MARK, PORT160_FN1),
PINMUX_DATA(SCIFA3_TXD_PORT160_MARK, PORT160_FN2, MSEL5CR_8_1),
PINMUX_DATA(LCD0_D22_PORT160_MARK, PORT160_FN4, MSEL5CR_6_1),
PINMUX_DATA(VIO1_HD_MARK, PORT160_FN5),
PINMUX_DATA(IDE_D13_MARK, PORT160_FN6),
/* Port161 */
PINMUX_DATA(D28_MARK, PORT161_FN1),
PINMUX_DATA(SCIFA3_RTS_PORT161_MARK, PORT161_FN2, MSEL5CR_8_1),
PINMUX_DATA(ET_RX_DV_MARK, PORT161_FN3),
PINMUX_DATA(LCD0_D20_PORT161_MARK, PORT161_FN4, MSEL5CR_6_1),
PINMUX_DATA(IRDA_IN_MARK, PORT161_FN5),
PINMUX_DATA(IDE_D12_MARK, PORT161_FN6),
/* Port162 */
PINMUX_DATA(D27_MARK, PORT162_FN1),
PINMUX_DATA(SCIFA3_CTS_PORT162_MARK, PORT162_FN2, MSEL5CR_8_1),
PINMUX_DATA(LCD0_D19_PORT162_MARK, PORT162_FN4, MSEL5CR_6_1),
PINMUX_DATA(IRDA_OUT_MARK, PORT162_FN5),
PINMUX_DATA(IDE_D11_MARK, PORT162_FN6),
/* Port163 */
PINMUX_DATA(D26_MARK, PORT163_FN1),
PINMUX_DATA(MSIOF2_SS2_MARK, PORT163_FN2),
PINMUX_DATA(ET_COL_MARK, PORT163_FN3),
PINMUX_DATA(LCD0_D18_PORT163_MARK, PORT163_FN4, MSEL5CR_6_1),
PINMUX_DATA(IROUT_MARK, PORT163_FN5),
PINMUX_DATA(IDE_D10_MARK, PORT163_FN6),
/* Port164 */
PINMUX_DATA(D25_MARK, PORT164_FN1),
PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT164_FN2),
PINMUX_DATA(ET_PHY_INT_MARK, PORT164_FN3),
PINMUX_DATA(LCD0_RD_MARK, PORT164_FN4),
PINMUX_DATA(IDE_D9_MARK, PORT164_FN6),
/* Port165 */
PINMUX_DATA(D24_MARK, PORT165_FN1),
PINMUX_DATA(MSIOF2_RXD_MARK, PORT165_FN2),
PINMUX_DATA(LCD0_LCLK_PORT165_MARK, PORT165_FN4, MSEL5CR_6_1),
PINMUX_DATA(IDE_D8_MARK, PORT165_FN6),
/* Port166 - Port171 Function1 */
PINMUX_DATA(D21_MARK, PORT166_FN1),
PINMUX_DATA(D20_MARK, PORT167_FN1),
PINMUX_DATA(D19_MARK, PORT168_FN1),
PINMUX_DATA(D18_MARK, PORT169_FN1),
PINMUX_DATA(D17_MARK, PORT170_FN1),
PINMUX_DATA(D16_MARK, PORT171_FN1),
/* Port166 - Port171 Function3 */
PINMUX_DATA(ET_ETXD5_MARK, PORT166_FN3),
PINMUX_DATA(ET_ETXD4_MARK, PORT167_FN3),
PINMUX_DATA(ET_ETXD3_MARK, PORT168_FN3),
PINMUX_DATA(ET_ETXD2_MARK, PORT169_FN3),
PINMUX_DATA(ET_ETXD1_MARK, PORT170_FN3),
PINMUX_DATA(ET_ETXD0_MARK, PORT171_FN3),
/* Port166 - Port171 Function6 */
PINMUX_DATA(IDE_D5_MARK, PORT166_FN6),
PINMUX_DATA(IDE_D4_MARK, PORT167_FN6),
PINMUX_DATA(IDE_D3_MARK, PORT168_FN6),
PINMUX_DATA(IDE_D2_MARK, PORT169_FN6),
PINMUX_DATA(IDE_D1_MARK, PORT170_FN6),
PINMUX_DATA(IDE_D0_MARK, PORT171_FN6),
/* Port167 - Port171 IRQ */
PINMUX_DATA(IRQ31_PORT167_MARK, PORT167_FN0, MSEL1CR_31_0),
PINMUX_DATA(IRQ27_PORT168_MARK, PORT168_FN0, MSEL1CR_27_0),
PINMUX_DATA(IRQ28_PORT169_MARK, PORT169_FN0, MSEL1CR_28_0),
PINMUX_DATA(IRQ29_PORT170_MARK, PORT170_FN0, MSEL1CR_29_0),
PINMUX_DATA(IRQ30_PORT171_MARK, PORT171_FN0, MSEL1CR_30_0),
/* Port172 */
PINMUX_DATA(D23_MARK, PORT172_FN1),
PINMUX_DATA(SCIFB_RTS_PORT172_MARK, PORT172_FN2, MSEL5CR_17_1),
PINMUX_DATA(ET_ETXD7_MARK, PORT172_FN3),
PINMUX_DATA(IDE_D7_MARK, PORT172_FN6),
PINMUX_DATA(IRQ4_PORT172_MARK, PORT172_FN0, MSEL1CR_4_1),
/* Port173 */
PINMUX_DATA(D22_MARK, PORT173_FN1),
PINMUX_DATA(SCIFB_CTS_PORT173_MARK, PORT173_FN2, MSEL5CR_17_1),
PINMUX_DATA(ET_ETXD6_MARK, PORT173_FN3),
PINMUX_DATA(IDE_D6_MARK, PORT173_FN6),
PINMUX_DATA(IRQ6_PORT173_MARK, PORT173_FN0, MSEL1CR_6_1),
/* Port174 */
PINMUX_DATA(A26_MARK, PORT174_FN1),
PINMUX_DATA(MSIOF0_TXD_MARK, PORT174_FN2),
PINMUX_DATA(ET_RX_CLK_MARK, PORT174_FN3),
PINMUX_DATA(SCIFA3_RXD_PORT174_MARK, PORT174_FN4, MSEL5CR_8_0),
/* Port175 */
PINMUX_DATA(A0_MARK, PORT175_FN1),
PINMUX_DATA(BS_MARK, PORT175_FN2),
PINMUX_DATA(ET_WOL_MARK, PORT175_FN3),
PINMUX_DATA(SCIFA3_TXD_PORT175_MARK, PORT175_FN4, MSEL5CR_8_0),
/* Port176 */
PINMUX_DATA(ET_GTX_CLK_MARK, PORT176_FN3),
/* Port177 */
PINMUX_DATA(WAIT_PORT177_MARK, PORT177_FN1, MSEL5CR_2_0),
PINMUX_DATA(ET_LINK_MARK, PORT177_FN3),
PINMUX_DATA(IDE_IOWR_MARK, PORT177_FN6),
PINMUX_DATA(SDHI2_WP_PORT177_MARK, PORT177_FN7, MSEL5CR_19_1),
/* Port178 */
PINMUX_DATA(VIO0_D12_MARK, PORT178_FN1),
PINMUX_DATA(VIO1_D4_MARK, PORT178_FN5),
PINMUX_DATA(IDE_IORD_MARK, PORT178_FN6),
/* Port179 */
PINMUX_DATA(VIO0_D11_MARK, PORT179_FN1),
PINMUX_DATA(VIO1_D3_MARK, PORT179_FN5),
PINMUX_DATA(IDE_IORDY_MARK, PORT179_FN6),
/* Port180 */
PINMUX_DATA(VIO0_D10_MARK, PORT180_FN1),
PINMUX_DATA(TPU0TO3_MARK, PORT180_FN4),
PINMUX_DATA(VIO1_D2_MARK, PORT180_FN5),
PINMUX_DATA(IDE_INT_MARK, PORT180_FN6),
PINMUX_DATA(IRQ24_MARK, PORT180_FN0),
/* Port181 */
PINMUX_DATA(VIO0_D9_MARK, PORT181_FN1),
PINMUX_DATA(VIO1_D1_MARK, PORT181_FN5),
PINMUX_DATA(IDE_RST_MARK, PORT181_FN6),
/* Port182 */
PINMUX_DATA(VIO0_D8_MARK, PORT182_FN1),
PINMUX_DATA(VIO1_D0_MARK, PORT182_FN5),
PINMUX_DATA(IDE_DIRECTION_MARK, PORT182_FN6),
/* Port183 */
PINMUX_DATA(DREQ1_MARK, PORT183_FN1),
PINMUX_DATA(BBIF2_TXD2_PORT183_MARK, PORT183_FN2, MSEL5CR_0_1),
PINMUX_DATA(ET_TX_EN_MARK, PORT183_FN3),
/* Port184 */
PINMUX_DATA(DACK1_MARK, PORT184_FN1),
PINMUX_DATA(BBIF2_TSYNC2_PORT184_MARK, PORT184_FN2, MSEL5CR_0_1),
PINMUX_DATA(ET_TX_CLK_MARK, PORT184_FN3),
/* Port185 - Port192 Function1 */
PINMUX_DATA(SCIFA1_SCK_MARK, PORT185_FN1),
PINMUX_DATA(SCIFB_RTS_PORT186_MARK, PORT186_FN1, MSEL5CR_17_0),
PINMUX_DATA(SCIFB_CTS_PORT187_MARK, PORT187_FN1, MSEL5CR_17_0),
PINMUX_DATA(SCIFA0_SCK_MARK, PORT188_FN1),
PINMUX_DATA(SCIFB_SCK_PORT190_MARK, PORT190_FN1, MSEL5CR_17_0),
PINMUX_DATA(SCIFB_RXD_PORT191_MARK, PORT191_FN1, MSEL5CR_17_0),
PINMUX_DATA(SCIFB_TXD_PORT192_MARK, PORT192_FN1, MSEL5CR_17_0),
/* Port185 - Port192 Function3 */
PINMUX_DATA(ET_ERXD0_MARK, PORT185_FN3),
PINMUX_DATA(ET_ERXD1_MARK, PORT186_FN3),
PINMUX_DATA(ET_ERXD2_MARK, PORT187_FN3),
PINMUX_DATA(ET_ERXD3_MARK, PORT188_FN3),
PINMUX_DATA(ET_ERXD4_MARK, PORT189_FN3),
PINMUX_DATA(ET_ERXD5_MARK, PORT190_FN3),
PINMUX_DATA(ET_ERXD6_MARK, PORT191_FN3),
PINMUX_DATA(ET_ERXD7_MARK, PORT192_FN3),
/* Port185 - Port192 Function6 */
PINMUX_DATA(STP1_IPCLK_MARK, PORT185_FN6),
PINMUX_DATA(STP1_IPD0_PORT186_MARK, PORT186_FN6, MSEL5CR_23_0),
PINMUX_DATA(STP1_IPEN_PORT187_MARK, PORT187_FN6, MSEL5CR_23_0),
PINMUX_DATA(STP1_IPSYNC_MARK, PORT188_FN6),
PINMUX_DATA(STP0_IPCLK_MARK, PORT189_FN6),
PINMUX_DATA(STP0_IPD0_MARK, PORT190_FN6),
PINMUX_DATA(STP0_IPEN_MARK, PORT191_FN6),
PINMUX_DATA(STP0_IPSYNC_MARK, PORT192_FN6),
/* Port193 */
PINMUX_DATA(SCIFA0_CTS_MARK, PORT193_FN1),
PINMUX_DATA(RMII_CRS_DV_MARK, PORT193_FN3),
PINMUX_DATA(STP1_IPEN_PORT193_MARK, PORT193_FN6, MSEL5CR_23_1), /* ? */
PINMUX_DATA(LCD1_D17_MARK, PORT193_FN7),
/* Port194 */
PINMUX_DATA(SCIFA0_RTS_MARK, PORT194_FN1),
PINMUX_DATA(RMII_RX_ER_MARK, PORT194_FN3),
PINMUX_DATA(STP1_IPD0_PORT194_MARK, PORT194_FN6, MSEL5CR_23_1), /* ? */
PINMUX_DATA(LCD1_D16_MARK, PORT194_FN7),
/* Port195 */
PINMUX_DATA(SCIFA1_RXD_MARK, PORT195_FN1),
PINMUX_DATA(RMII_RXD0_MARK, PORT195_FN3),
PINMUX_DATA(STP1_IPD3_MARK, PORT195_FN6),
PINMUX_DATA(LCD1_D15_MARK, PORT195_FN7),
/* Port196 */
PINMUX_DATA(SCIFA1_TXD_MARK, PORT196_FN1),
PINMUX_DATA(RMII_RXD1_MARK, PORT196_FN3),
PINMUX_DATA(STP1_IPD2_MARK, PORT196_FN6),
PINMUX_DATA(LCD1_D14_MARK, PORT196_FN7),
/* Port197 */
PINMUX_DATA(SCIFA0_RXD_MARK, PORT197_FN1),
PINMUX_DATA(VIO1_CLK_MARK, PORT197_FN5),
PINMUX_DATA(STP1_IPD5_MARK, PORT197_FN6),
PINMUX_DATA(LCD1_D19_MARK, PORT197_FN7),
/* Port198 */
PINMUX_DATA(SCIFA0_TXD_MARK, PORT198_FN1),
PINMUX_DATA(VIO1_VD_MARK, PORT198_FN5),
PINMUX_DATA(STP1_IPD4_MARK, PORT198_FN6),
PINMUX_DATA(LCD1_D18_MARK, PORT198_FN7),
/* Port199 */
PINMUX_DATA(MEMC_NWE_MARK, PORT199_FN1),
PINMUX_DATA(SCIFA2_SCK_PORT199_MARK, PORT199_FN2, MSEL5CR_7_1),
PINMUX_DATA(RMII_TX_EN_MARK, PORT199_FN3),
PINMUX_DATA(SIM_D_PORT199_MARK, PORT199_FN4, MSEL5CR_21_1),
PINMUX_DATA(STP1_IPD1_MARK, PORT199_FN6),
PINMUX_DATA(LCD1_D13_MARK, PORT199_FN7),
/* Port200 */
PINMUX_DATA(MEMC_NOE_MARK, PORT200_FN1),
PINMUX_DATA(SCIFA2_RXD_MARK, PORT200_FN2),
PINMUX_DATA(RMII_TXD0_MARK, PORT200_FN3),
PINMUX_DATA(STP0_IPD7_MARK, PORT200_FN6),
PINMUX_DATA(LCD1_D12_MARK, PORT200_FN7),
/* Port201 */
PINMUX_DATA(MEMC_WAIT_MARK, PORT201_FN1, MSEL4CR_6_0),
PINMUX_DATA(MEMC_DREQ1_MARK, PORT201_FN1, MSEL4CR_6_1),
PINMUX_DATA(SCIFA2_TXD_MARK, PORT201_FN2),
PINMUX_DATA(RMII_TXD1_MARK, PORT201_FN3),
PINMUX_DATA(STP0_IPD6_MARK, PORT201_FN6),
PINMUX_DATA(LCD1_D11_MARK, PORT201_FN7),
/* Port202 */
PINMUX_DATA(MEMC_BUSCLK_MARK, PORT202_FN1, MSEL4CR_6_0),
PINMUX_DATA(MEMC_A0_MARK, PORT202_FN1, MSEL4CR_6_1),
PINMUX_DATA(MSIOF1_SS2_PORT202_MARK, PORT202_FN2, MSEL4CR_10_1),
PINMUX_DATA(RMII_MDC_MARK, PORT202_FN3),
PINMUX_DATA(TPU0TO2_PORT202_MARK, PORT202_FN4, MSEL5CR_25_1),
PINMUX_DATA(IDE_CS0_MARK, PORT202_FN6),
PINMUX_DATA(SDHI2_CD_PORT202_MARK, PORT202_FN7, MSEL5CR_19_1),
PINMUX_DATA(IRQ21_MARK, PORT202_FN0),
/* Port203 - Port208 Function1 */
PINMUX_DATA(SDHI2_CLK_MARK, PORT203_FN1),
PINMUX_DATA(SDHI2_CMD_MARK, PORT204_FN1),
PINMUX_DATA(SDHI2_D0_MARK, PORT205_FN1),
PINMUX_DATA(SDHI2_D1_MARK, PORT206_FN1),
PINMUX_DATA(SDHI2_D2_MARK, PORT207_FN1),
PINMUX_DATA(SDHI2_D3_MARK, PORT208_FN1),
/* Port203 - Port208 Function3 */
PINMUX_DATA(ET_TX_ER_MARK, PORT203_FN3),
PINMUX_DATA(ET_RX_ER_MARK, PORT204_FN3),
PINMUX_DATA(ET_CRS_MARK, PORT205_FN3),
PINMUX_DATA(ET_MDC_MARK, PORT206_FN3),
PINMUX_DATA(ET_MDIO_MARK, PORT207_FN3),
PINMUX_DATA(RMII_MDIO_MARK, PORT208_FN3),
/* Port203 - Port208 Function6 */
PINMUX_DATA(IDE_A2_MARK, PORT203_FN6),
PINMUX_DATA(IDE_A1_MARK, PORT204_FN6),
PINMUX_DATA(IDE_A0_MARK, PORT205_FN6),
PINMUX_DATA(IDE_IODACK_MARK, PORT206_FN6),
PINMUX_DATA(IDE_IODREQ_MARK, PORT207_FN6),
PINMUX_DATA(IDE_CS1_MARK, PORT208_FN6),
/* Port203 - Port208 Function7 */
PINMUX_DATA(SCIFA4_TXD_PORT203_MARK, PORT203_FN7, MSEL5CR_12_0, MSEL5CR_11_1),
PINMUX_DATA(SCIFA4_RXD_PORT204_MARK, PORT204_FN7, MSEL5CR_12_0, MSEL5CR_11_1),
PINMUX_DATA(SCIFA4_SCK_PORT205_MARK, PORT205_FN7, MSEL5CR_10_1),
PINMUX_DATA(SCIFA5_SCK_PORT206_MARK, PORT206_FN7, MSEL5CR_13_1),
PINMUX_DATA(SCIFA5_RXD_PORT207_MARK, PORT207_FN7, MSEL5CR_15_0, MSEL5CR_14_1),
PINMUX_DATA(SCIFA5_TXD_PORT208_MARK, PORT208_FN7, MSEL5CR_15_0, MSEL5CR_14_1),
/* Port209 */
PINMUX_DATA(VBUS_MARK, PORT209_FN1),
PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_1),
/* Port210 */
PINMUX_DATA(IRQ9_PORT210_MARK, PORT210_FN0, MSEL1CR_9_1),
/* Port211 */
PINMUX_DATA(IRQ16_PORT211_MARK, PORT211_FN0, MSEL1CR_16_1),
/* LCDC select */
PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0),
PINMUX_DATA(LCDC1_SELECT_MARK, MSEL3CR_6_1),
/* SDENC */
PINMUX_DATA(SDENC_CPG_MARK, MSEL4CR_19_0),
PINMUX_DATA(SDENC_DV_CLKI_MARK, MSEL4CR_19_1),
/* SYSC */
PINMUX_DATA(RESETP_PULLUP_MARK, MSEL4CR_4_0),
PINMUX_DATA(RESETP_PLAIN_MARK, MSEL4CR_4_1),
/* DEBUG */
PINMUX_DATA(EDEBGREQ_PULLDOWN_MARK, MSEL4CR_1_0),
PINMUX_DATA(EDEBGREQ_PULLUP_MARK, MSEL4CR_1_1),
PINMUX_DATA(TRACEAUD_FROM_VIO_MARK, MSEL5CR_30_0, MSEL5CR_29_0),
PINMUX_DATA(TRACEAUD_FROM_LCDC0_MARK, MSEL5CR_30_0, MSEL5CR_29_1),
PINMUX_DATA(TRACEAUD_FROM_MEMC_MARK, MSEL5CR_30_1, MSEL5CR_29_0),
};
static struct pinmux_gpio pinmux_gpios[] = {
/* PORT */
GPIO_PORT_ALL(),
/* IRQ */
GPIO_FN(IRQ0_PORT2), GPIO_FN(IRQ0_PORT13),
GPIO_FN(IRQ1),
GPIO_FN(IRQ2_PORT11), GPIO_FN(IRQ2_PORT12),
GPIO_FN(IRQ3_PORT10), GPIO_FN(IRQ3_PORT14),
GPIO_FN(IRQ4_PORT15), GPIO_FN(IRQ4_PORT172),
GPIO_FN(IRQ5_PORT0), GPIO_FN(IRQ5_PORT1),
GPIO_FN(IRQ6_PORT121), GPIO_FN(IRQ6_PORT173),
GPIO_FN(IRQ7_PORT120), GPIO_FN(IRQ7_PORT209),
GPIO_FN(IRQ8),
GPIO_FN(IRQ9_PORT118), GPIO_FN(IRQ9_PORT210),
GPIO_FN(IRQ10),
GPIO_FN(IRQ11),
GPIO_FN(IRQ12_PORT42), GPIO_FN(IRQ12_PORT97),
GPIO_FN(IRQ13_PORT64), GPIO_FN(IRQ13_PORT98),
GPIO_FN(IRQ14_PORT63), GPIO_FN(IRQ14_PORT99),
GPIO_FN(IRQ15_PORT62), GPIO_FN(IRQ15_PORT100),
GPIO_FN(IRQ16_PORT68), GPIO_FN(IRQ16_PORT211),
GPIO_FN(IRQ17),
GPIO_FN(IRQ18),
GPIO_FN(IRQ19),
GPIO_FN(IRQ20),
GPIO_FN(IRQ21),
GPIO_FN(IRQ22),
GPIO_FN(IRQ23),
GPIO_FN(IRQ24),
GPIO_FN(IRQ25),
GPIO_FN(IRQ26_PORT58), GPIO_FN(IRQ26_PORT81),
GPIO_FN(IRQ27_PORT57), GPIO_FN(IRQ27_PORT168),
GPIO_FN(IRQ28_PORT56), GPIO_FN(IRQ28_PORT169),
GPIO_FN(IRQ29_PORT50), GPIO_FN(IRQ29_PORT170),
GPIO_FN(IRQ30_PORT49), GPIO_FN(IRQ30_PORT171),
GPIO_FN(IRQ31_PORT41), GPIO_FN(IRQ31_PORT167),
/* Function */
/* DBGT */
GPIO_FN(DBGMDT2), GPIO_FN(DBGMDT1), GPIO_FN(DBGMDT0),
GPIO_FN(DBGMD10), GPIO_FN(DBGMD11), GPIO_FN(DBGMD20),
GPIO_FN(DBGMD21),
/* FSI */
GPIO_FN(FSIAISLD_PORT0), /* FSIAISLD Port 0/5 */
GPIO_FN(FSIAISLD_PORT5),
GPIO_FN(FSIASPDIF_PORT9), /* FSIASPDIF Port 9/18 */
GPIO_FN(FSIASPDIF_PORT18),
GPIO_FN(FSIAOSLD1), GPIO_FN(FSIAOSLD2), GPIO_FN(FSIAOLR),
GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD), GPIO_FN(FSIAOMC),
GPIO_FN(FSIACK), GPIO_FN(FSIAILR), GPIO_FN(FSIAIBT),
/* FMSI */
GPIO_FN(FMSISLD_PORT1), /* FMSISLD Port 1/6 */
GPIO_FN(FMSISLD_PORT6),
GPIO_FN(FMSIILR), GPIO_FN(FMSIIBT), GPIO_FN(FMSIOLR),
GPIO_FN(FMSIOBT), GPIO_FN(FMSICK), GPIO_FN(FMSOILR),
GPIO_FN(FMSOIBT), GPIO_FN(FMSOOLR), GPIO_FN(FMSOOBT),
GPIO_FN(FMSOSLD), GPIO_FN(FMSOCK),
/* SCIFA0 */
GPIO_FN(SCIFA0_SCK), GPIO_FN(SCIFA0_CTS), GPIO_FN(SCIFA0_RTS),
GPIO_FN(SCIFA0_RXD), GPIO_FN(SCIFA0_TXD),
/* SCIFA1 */
GPIO_FN(SCIFA1_CTS), GPIO_FN(SCIFA1_SCK),
GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_TXD), GPIO_FN(SCIFA1_RTS),
/* SCIFA2 */
GPIO_FN(SCIFA2_SCK_PORT22), /* SCIFA2_SCK Port 22/199 */
GPIO_FN(SCIFA2_SCK_PORT199),
GPIO_FN(SCIFA2_RXD), GPIO_FN(SCIFA2_TXD),
GPIO_FN(SCIFA2_CTS), GPIO_FN(SCIFA2_RTS),
/* SCIFA3 */
GPIO_FN(SCIFA3_RTS_PORT105), /* MSEL5CR_8_0 */
GPIO_FN(SCIFA3_SCK_PORT116),
GPIO_FN(SCIFA3_CTS_PORT117),
GPIO_FN(SCIFA3_RXD_PORT174),
GPIO_FN(SCIFA3_TXD_PORT175),
GPIO_FN(SCIFA3_RTS_PORT161), /* MSEL5CR_8_1 */
GPIO_FN(SCIFA3_SCK_PORT158),
GPIO_FN(SCIFA3_CTS_PORT162),
GPIO_FN(SCIFA3_RXD_PORT159),
GPIO_FN(SCIFA3_TXD_PORT160),
/* SCIFA4 */
GPIO_FN(SCIFA4_RXD_PORT12), /* MSEL5CR[12:11] = 00 */
GPIO_FN(SCIFA4_TXD_PORT13),
GPIO_FN(SCIFA4_RXD_PORT204), /* MSEL5CR[12:11] = 01 */
GPIO_FN(SCIFA4_TXD_PORT203),
GPIO_FN(SCIFA4_RXD_PORT94), /* MSEL5CR[12:11] = 10 */
GPIO_FN(SCIFA4_TXD_PORT93),
GPIO_FN(SCIFA4_SCK_PORT21), /* SCIFA4_SCK Port 21/205 */
GPIO_FN(SCIFA4_SCK_PORT205),
/* SCIFA5 */
GPIO_FN(SCIFA5_TXD_PORT20), /* MSEL5CR[15:14] = 00 */
GPIO_FN(SCIFA5_RXD_PORT10),
GPIO_FN(SCIFA5_RXD_PORT207), /* MSEL5CR[15:14] = 01 */
GPIO_FN(SCIFA5_TXD_PORT208),
GPIO_FN(SCIFA5_TXD_PORT91), /* MSEL5CR[15:14] = 10 */
GPIO_FN(SCIFA5_RXD_PORT92),
GPIO_FN(SCIFA5_SCK_PORT23), /* SCIFA5_SCK Port 23/206 */
GPIO_FN(SCIFA5_SCK_PORT206),
/* SCIFA6 */
GPIO_FN(SCIFA6_SCK), GPIO_FN(SCIFA6_RXD), GPIO_FN(SCIFA6_TXD),
/* SCIFA7 */
GPIO_FN(SCIFA7_TXD), GPIO_FN(SCIFA7_RXD),
/* SCIFAB */
GPIO_FN(SCIFB_SCK_PORT190), /* MSEL5CR_17_0 */
GPIO_FN(SCIFB_RXD_PORT191),
GPIO_FN(SCIFB_TXD_PORT192),
GPIO_FN(SCIFB_RTS_PORT186),
GPIO_FN(SCIFB_CTS_PORT187),
GPIO_FN(SCIFB_SCK_PORT2), /* MSEL5CR_17_1 */
GPIO_FN(SCIFB_RXD_PORT3),
GPIO_FN(SCIFB_TXD_PORT4),
GPIO_FN(SCIFB_RTS_PORT172),
GPIO_FN(SCIFB_CTS_PORT173),
/* LCD0 */
GPIO_FN(LCD0_D0), GPIO_FN(LCD0_D1), GPIO_FN(LCD0_D2),
GPIO_FN(LCD0_D3), GPIO_FN(LCD0_D4), GPIO_FN(LCD0_D5),
GPIO_FN(LCD0_D6), GPIO_FN(LCD0_D7), GPIO_FN(LCD0_D8),
GPIO_FN(LCD0_D9), GPIO_FN(LCD0_D10), GPIO_FN(LCD0_D11),
GPIO_FN(LCD0_D12), GPIO_FN(LCD0_D13), GPIO_FN(LCD0_D14),
GPIO_FN(LCD0_D15), GPIO_FN(LCD0_D16), GPIO_FN(LCD0_D17),
GPIO_FN(LCD0_DON), GPIO_FN(LCD0_VCPWC), GPIO_FN(LCD0_VEPWC),
GPIO_FN(LCD0_DCK), GPIO_FN(LCD0_VSYN),
GPIO_FN(LCD0_HSYN), GPIO_FN(LCD0_DISP),
GPIO_FN(LCD0_WR), GPIO_FN(LCD0_RD),
GPIO_FN(LCD0_CS), GPIO_FN(LCD0_RS),
GPIO_FN(LCD0_D18_PORT163), GPIO_FN(LCD0_D19_PORT162),
GPIO_FN(LCD0_D20_PORT161), GPIO_FN(LCD0_D21_PORT158),
GPIO_FN(LCD0_D22_PORT160), GPIO_FN(LCD0_D23_PORT159),
GPIO_FN(LCD0_LCLK_PORT165), /* MSEL5CR_6_1 */
GPIO_FN(LCD0_D18_PORT40), GPIO_FN(LCD0_D19_PORT4),
GPIO_FN(LCD0_D20_PORT3), GPIO_FN(LCD0_D21_PORT2),
GPIO_FN(LCD0_D22_PORT0), GPIO_FN(LCD0_D23_PORT1),
GPIO_FN(LCD0_LCLK_PORT102), /* MSEL5CR_6_0 */
/* LCD1 */
GPIO_FN(LCD1_D0), GPIO_FN(LCD1_D1), GPIO_FN(LCD1_D2),
GPIO_FN(LCD1_D3), GPIO_FN(LCD1_D4), GPIO_FN(LCD1_D5),
GPIO_FN(LCD1_D6), GPIO_FN(LCD1_D7), GPIO_FN(LCD1_D8),
GPIO_FN(LCD1_D9), GPIO_FN(LCD1_D10), GPIO_FN(LCD1_D11),
GPIO_FN(LCD1_D12), GPIO_FN(LCD1_D13), GPIO_FN(LCD1_D14),
GPIO_FN(LCD1_D15), GPIO_FN(LCD1_D16), GPIO_FN(LCD1_D17),
GPIO_FN(LCD1_D18), GPIO_FN(LCD1_D19), GPIO_FN(LCD1_D20),
GPIO_FN(LCD1_D21), GPIO_FN(LCD1_D22), GPIO_FN(LCD1_D23),
GPIO_FN(LCD1_RS), GPIO_FN(LCD1_RD), GPIO_FN(LCD1_CS),
GPIO_FN(LCD1_WR), GPIO_FN(LCD1_DCK), GPIO_FN(LCD1_DON),
GPIO_FN(LCD1_VCPWC), GPIO_FN(LCD1_LCLK), GPIO_FN(LCD1_HSYN),
GPIO_FN(LCD1_VSYN), GPIO_FN(LCD1_VEPWC), GPIO_FN(LCD1_DISP),
/* RSPI */
GPIO_FN(RSPI_SSL0_A), GPIO_FN(RSPI_SSL1_A), GPIO_FN(RSPI_SSL2_A),
GPIO_FN(RSPI_SSL3_A), GPIO_FN(RSPI_CK_A), GPIO_FN(RSPI_MOSI_A),
GPIO_FN(RSPI_MISO_A),
/* VIO CKO */
GPIO_FN(VIO_CKO1),
GPIO_FN(VIO_CKO2),
GPIO_FN(VIO_CKO_1),
GPIO_FN(VIO_CKO),
/* VIO0 */
GPIO_FN(VIO0_D0), GPIO_FN(VIO0_D1), GPIO_FN(VIO0_D2),
GPIO_FN(VIO0_D3), GPIO_FN(VIO0_D4), GPIO_FN(VIO0_D5),
GPIO_FN(VIO0_D6), GPIO_FN(VIO0_D7), GPIO_FN(VIO0_D8),
GPIO_FN(VIO0_D9), GPIO_FN(VIO0_D10), GPIO_FN(VIO0_D11),
GPIO_FN(VIO0_D12), GPIO_FN(VIO0_VD), GPIO_FN(VIO0_HD),
GPIO_FN(VIO0_CLK), GPIO_FN(VIO0_FIELD),
GPIO_FN(VIO0_D13_PORT26), /* MSEL5CR_27_0 */
GPIO_FN(VIO0_D14_PORT25),
GPIO_FN(VIO0_D15_PORT24),
GPIO_FN(VIO0_D13_PORT22), /* MSEL5CR_27_1 */
GPIO_FN(VIO0_D14_PORT95),
GPIO_FN(VIO0_D15_PORT96),
/* VIO1 */
GPIO_FN(VIO1_D0), GPIO_FN(VIO1_D1), GPIO_FN(VIO1_D2),
GPIO_FN(VIO1_D3), GPIO_FN(VIO1_D4), GPIO_FN(VIO1_D5),
GPIO_FN(VIO1_D6), GPIO_FN(VIO1_D7), GPIO_FN(VIO1_VD),
GPIO_FN(VIO1_HD), GPIO_FN(VIO1_CLK), GPIO_FN(VIO1_FIELD),
/* TPU0 */
GPIO_FN(TPU0TO0), GPIO_FN(TPU0TO1), GPIO_FN(TPU0TO3),
GPIO_FN(TPU0TO2_PORT66), /* TPU0TO2 Port 66/202 */
GPIO_FN(TPU0TO2_PORT202),
/* SSP1 0 */
GPIO_FN(STP0_IPD0), GPIO_FN(STP0_IPD1), GPIO_FN(STP0_IPD2),
GPIO_FN(STP0_IPD3), GPIO_FN(STP0_IPD4), GPIO_FN(STP0_IPD5),
GPIO_FN(STP0_IPD6), GPIO_FN(STP0_IPD7), GPIO_FN(STP0_IPEN),
GPIO_FN(STP0_IPCLK), GPIO_FN(STP0_IPSYNC),
/* SSP1 1 */
GPIO_FN(STP1_IPD1), GPIO_FN(STP1_IPD2), GPIO_FN(STP1_IPD3),
GPIO_FN(STP1_IPD4), GPIO_FN(STP1_IPD5), GPIO_FN(STP1_IPD6),
GPIO_FN(STP1_IPD7), GPIO_FN(STP1_IPCLK), GPIO_FN(STP1_IPSYNC),
GPIO_FN(STP1_IPD0_PORT186), /* MSEL5CR_23_0 */
GPIO_FN(STP1_IPEN_PORT187),
GPIO_FN(STP1_IPD0_PORT194), /* MSEL5CR_23_1 */
GPIO_FN(STP1_IPEN_PORT193),
/* SIM */
GPIO_FN(SIM_RST), GPIO_FN(SIM_CLK),
GPIO_FN(SIM_D_PORT22), /* SIM_D Port 22/199 */
GPIO_FN(SIM_D_PORT199),
/* SDHI0 */
GPIO_FN(SDHI0_D0), GPIO_FN(SDHI0_D1), GPIO_FN(SDHI0_D2),
GPIO_FN(SDHI0_D3), GPIO_FN(SDHI0_CD), GPIO_FN(SDHI0_WP),
GPIO_FN(SDHI0_CMD), GPIO_FN(SDHI0_CLK),
/* SDHI1 */
GPIO_FN(SDHI1_D0), GPIO_FN(SDHI1_D1), GPIO_FN(SDHI1_D2),
GPIO_FN(SDHI1_D3), GPIO_FN(SDHI1_CD), GPIO_FN(SDHI1_WP),
GPIO_FN(SDHI1_CMD), GPIO_FN(SDHI1_CLK),
/* SDHI2 */
GPIO_FN(SDHI2_D0), GPIO_FN(SDHI2_D1), GPIO_FN(SDHI2_D2),
GPIO_FN(SDHI2_D3), GPIO_FN(SDHI2_CLK), GPIO_FN(SDHI2_CMD),
GPIO_FN(SDHI2_CD_PORT24), /* MSEL5CR_19_0 */
GPIO_FN(SDHI2_WP_PORT25),
GPIO_FN(SDHI2_WP_PORT177), /* MSEL5CR_19_1 */
GPIO_FN(SDHI2_CD_PORT202),
/* MSIOF2 */
GPIO_FN(MSIOF2_TXD), GPIO_FN(MSIOF2_RXD), GPIO_FN(MSIOF2_TSCK),
GPIO_FN(MSIOF2_SS2), GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_SS1),
GPIO_FN(MSIOF2_MCK1), GPIO_FN(MSIOF2_MCK0), GPIO_FN(MSIOF2_RSYNC),
GPIO_FN(MSIOF2_RSCK),
/* KEYSC */
GPIO_FN(KEYIN4), GPIO_FN(KEYIN5),
GPIO_FN(KEYIN6), GPIO_FN(KEYIN7),
GPIO_FN(KEYOUT0), GPIO_FN(KEYOUT1), GPIO_FN(KEYOUT2),
GPIO_FN(KEYOUT3), GPIO_FN(KEYOUT4), GPIO_FN(KEYOUT5),
GPIO_FN(KEYOUT6), GPIO_FN(KEYOUT7),
GPIO_FN(KEYIN0_PORT43), /* MSEL4CR_18_0 */
GPIO_FN(KEYIN1_PORT44),
GPIO_FN(KEYIN2_PORT45),
GPIO_FN(KEYIN3_PORT46),
GPIO_FN(KEYIN0_PORT58), /* MSEL4CR_18_1 */
GPIO_FN(KEYIN1_PORT57),
GPIO_FN(KEYIN2_PORT56),
GPIO_FN(KEYIN3_PORT55),
/* VOU */
GPIO_FN(DV_D0), GPIO_FN(DV_D1), GPIO_FN(DV_D2),
GPIO_FN(DV_D3), GPIO_FN(DV_D4), GPIO_FN(DV_D5),
GPIO_FN(DV_D6), GPIO_FN(DV_D7), GPIO_FN(DV_D8),
GPIO_FN(DV_D9), GPIO_FN(DV_D10), GPIO_FN(DV_D11),
GPIO_FN(DV_D12), GPIO_FN(DV_D13), GPIO_FN(DV_D14),
GPIO_FN(DV_D15), GPIO_FN(DV_CLK),
GPIO_FN(DV_VSYNC), GPIO_FN(DV_HSYNC),
/* MEMC */
GPIO_FN(MEMC_AD0), GPIO_FN(MEMC_AD1), GPIO_FN(MEMC_AD2),
GPIO_FN(MEMC_AD3), GPIO_FN(MEMC_AD4), GPIO_FN(MEMC_AD5),
GPIO_FN(MEMC_AD6), GPIO_FN(MEMC_AD7), GPIO_FN(MEMC_AD8),
GPIO_FN(MEMC_AD9), GPIO_FN(MEMC_AD10), GPIO_FN(MEMC_AD11),
GPIO_FN(MEMC_AD12), GPIO_FN(MEMC_AD13), GPIO_FN(MEMC_AD14),
GPIO_FN(MEMC_AD15), GPIO_FN(MEMC_CS0), GPIO_FN(MEMC_INT),
GPIO_FN(MEMC_NWE), GPIO_FN(MEMC_NOE), GPIO_FN(MEMC_CS1),
GPIO_FN(MEMC_A1), GPIO_FN(MEMC_ADV), GPIO_FN(MEMC_DREQ0),
GPIO_FN(MEMC_WAIT), GPIO_FN(MEMC_DREQ1), GPIO_FN(MEMC_BUSCLK),
GPIO_FN(MEMC_A0),
/* MMC */
GPIO_FN(MMC0_D0_PORT68), GPIO_FN(MMC0_D1_PORT69),
GPIO_FN(MMC0_D2_PORT70), GPIO_FN(MMC0_D3_PORT71),
GPIO_FN(MMC0_D4_PORT72), GPIO_FN(MMC0_D5_PORT73),
GPIO_FN(MMC0_D6_PORT74), GPIO_FN(MMC0_D7_PORT75),
GPIO_FN(MMC0_CLK_PORT66),
GPIO_FN(MMC0_CMD_PORT67), /* MSEL4CR_15_0 */
GPIO_FN(MMC1_D0_PORT149), GPIO_FN(MMC1_D1_PORT148),
GPIO_FN(MMC1_D2_PORT147), GPIO_FN(MMC1_D3_PORT146),
GPIO_FN(MMC1_D4_PORT145), GPIO_FN(MMC1_D5_PORT144),
GPIO_FN(MMC1_D6_PORT143), GPIO_FN(MMC1_D7_PORT142),
GPIO_FN(MMC1_CLK_PORT103),
GPIO_FN(MMC1_CMD_PORT104), /* MSEL4CR_15_1 */
/* MSIOF0 */
GPIO_FN(MSIOF0_SS1), GPIO_FN(MSIOF0_SS2), GPIO_FN(MSIOF0_RXD),
GPIO_FN(MSIOF0_TXD), GPIO_FN(MSIOF0_MCK0), GPIO_FN(MSIOF0_MCK1),
GPIO_FN(MSIOF0_RSYNC), GPIO_FN(MSIOF0_RSCK), GPIO_FN(MSIOF0_TSCK),
GPIO_FN(MSIOF0_TSYNC),
/* MSIOF1 */
GPIO_FN(MSIOF1_RSCK), GPIO_FN(MSIOF1_RSYNC),
GPIO_FN(MSIOF1_MCK0), GPIO_FN(MSIOF1_MCK1),
GPIO_FN(MSIOF1_SS2_PORT116), GPIO_FN(MSIOF1_SS1_PORT117),
GPIO_FN(MSIOF1_RXD_PORT118), GPIO_FN(MSIOF1_TXD_PORT119),
GPIO_FN(MSIOF1_TSYNC_PORT120),
GPIO_FN(MSIOF1_TSCK_PORT121), /* MSEL4CR_10_0 */
GPIO_FN(MSIOF1_SS1_PORT67), GPIO_FN(MSIOF1_TSCK_PORT72),
GPIO_FN(MSIOF1_TSYNC_PORT73), GPIO_FN(MSIOF1_TXD_PORT74),
GPIO_FN(MSIOF1_RXD_PORT75),
GPIO_FN(MSIOF1_SS2_PORT202), /* MSEL4CR_10_1 */
/* GPIO */
GPIO_FN(GPO0), GPIO_FN(GPI0),
GPIO_FN(GPO1), GPIO_FN(GPI1),
/* USB0 */
GPIO_FN(USB0_OCI), GPIO_FN(USB0_PPON), GPIO_FN(VBUS),
/* USB1 */
GPIO_FN(USB1_OCI), GPIO_FN(USB1_PPON),
/* BBIF1 */
GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_TSYNC),
GPIO_FN(BBIF1_TSCK), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC),
GPIO_FN(BBIF1_FLOW), GPIO_FN(BBIF1_RX_FLOW_N),
/* BBIF2 */
GPIO_FN(BBIF2_TXD2_PORT5), /* MSEL5CR_0_0 */
GPIO_FN(BBIF2_RXD2_PORT60),
GPIO_FN(BBIF2_TSYNC2_PORT6),
GPIO_FN(BBIF2_TSCK2_PORT59),
GPIO_FN(BBIF2_RXD2_PORT90), /* MSEL5CR_0_1 */
GPIO_FN(BBIF2_TXD2_PORT183),
GPIO_FN(BBIF2_TSCK2_PORT89),
GPIO_FN(BBIF2_TSYNC2_PORT184),
/* BSC / FLCTL / PCMCIA */
GPIO_FN(CS0), GPIO_FN(CS2), GPIO_FN(CS4),
GPIO_FN(CS5B), GPIO_FN(CS6A),
GPIO_FN(CS5A_PORT105), /* CS5A PORT 19/105 */
GPIO_FN(CS5A_PORT19),
GPIO_FN(IOIS16), /* ? */
GPIO_FN(A0), GPIO_FN(A1), GPIO_FN(A2), GPIO_FN(A3),
GPIO_FN(A4_FOE), GPIO_FN(A5_FCDE), /* share with FLCTL */
GPIO_FN(A6), GPIO_FN(A7), GPIO_FN(A8), GPIO_FN(A9),
GPIO_FN(A10), GPIO_FN(A11), GPIO_FN(A12), GPIO_FN(A13),
GPIO_FN(A14), GPIO_FN(A15), GPIO_FN(A16), GPIO_FN(A17),
GPIO_FN(A18), GPIO_FN(A19), GPIO_FN(A20), GPIO_FN(A21),
GPIO_FN(A22), GPIO_FN(A23), GPIO_FN(A24), GPIO_FN(A25),
GPIO_FN(A26),
GPIO_FN(D0_NAF0), GPIO_FN(D1_NAF1), /* share with FLCTL */
GPIO_FN(D2_NAF2), GPIO_FN(D3_NAF3), /* share with FLCTL */
GPIO_FN(D4_NAF4), GPIO_FN(D5_NAF5), /* share with FLCTL */
GPIO_FN(D6_NAF6), GPIO_FN(D7_NAF7), /* share with FLCTL */
GPIO_FN(D8_NAF8), GPIO_FN(D9_NAF9), /* share with FLCTL */
GPIO_FN(D10_NAF10), GPIO_FN(D11_NAF11), /* share with FLCTL */
GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13), /* share with FLCTL */
GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15), /* share with FLCTL */
GPIO_FN(D16), GPIO_FN(D17), GPIO_FN(D18), GPIO_FN(D19),
GPIO_FN(D20), GPIO_FN(D21), GPIO_FN(D22), GPIO_FN(D23),
GPIO_FN(D24), GPIO_FN(D25), GPIO_FN(D26), GPIO_FN(D27),
GPIO_FN(D28), GPIO_FN(D29), GPIO_FN(D30), GPIO_FN(D31),
GPIO_FN(WE0_FWE), /* share with FLCTL */
GPIO_FN(WE1),
GPIO_FN(WE2_ICIORD), /* share with PCMCIA */
GPIO_FN(WE3_ICIOWR), /* share with PCMCIA */
GPIO_FN(CKO), GPIO_FN(BS), GPIO_FN(RDWR),
GPIO_FN(RD_FSC), /* share with FLCTL */
GPIO_FN(WAIT_PORT177), /* WAIT Port 90/177 */
GPIO_FN(WAIT_PORT90),
GPIO_FN(FCE0), GPIO_FN(FCE1), GPIO_FN(FRB), /* FLCTL */
/* IRDA */
GPIO_FN(IRDA_FIRSEL), GPIO_FN(IRDA_IN), GPIO_FN(IRDA_OUT),
/* ATAPI */
GPIO_FN(IDE_D0), GPIO_FN(IDE_D1), GPIO_FN(IDE_D2),
GPIO_FN(IDE_D3), GPIO_FN(IDE_D4), GPIO_FN(IDE_D5),
GPIO_FN(IDE_D6), GPIO_FN(IDE_D7), GPIO_FN(IDE_D8),
GPIO_FN(IDE_D9), GPIO_FN(IDE_D10), GPIO_FN(IDE_D11),
GPIO_FN(IDE_D12), GPIO_FN(IDE_D13), GPIO_FN(IDE_D14),
GPIO_FN(IDE_D15), GPIO_FN(IDE_A0), GPIO_FN(IDE_A1),
GPIO_FN(IDE_A2), GPIO_FN(IDE_CS0), GPIO_FN(IDE_CS1),
GPIO_FN(IDE_IOWR), GPIO_FN(IDE_IORD), GPIO_FN(IDE_IORDY),
GPIO_FN(IDE_INT), GPIO_FN(IDE_RST), GPIO_FN(IDE_DIRECTION),
GPIO_FN(IDE_EXBUF_ENB), GPIO_FN(IDE_IODACK), GPIO_FN(IDE_IODREQ),
/* RMII */
GPIO_FN(RMII_CRS_DV), GPIO_FN(RMII_RX_ER), GPIO_FN(RMII_RXD0),
GPIO_FN(RMII_RXD1), GPIO_FN(RMII_TX_EN), GPIO_FN(RMII_TXD0),
GPIO_FN(RMII_MDC), GPIO_FN(RMII_TXD1), GPIO_FN(RMII_MDIO),
GPIO_FN(RMII_REF50CK), GPIO_FN(RMII_REF125CK), /* for GMII */
/* GEther */
GPIO_FN(ET_TX_CLK), GPIO_FN(ET_TX_EN), GPIO_FN(ET_ETXD0),
GPIO_FN(ET_ETXD1), GPIO_FN(ET_ETXD2), GPIO_FN(ET_ETXD3),
GPIO_FN(ET_ETXD4), GPIO_FN(ET_ETXD5), /* for GEther */
GPIO_FN(ET_ETXD6), GPIO_FN(ET_ETXD7), /* for GEther */
GPIO_FN(ET_COL), GPIO_FN(ET_TX_ER), GPIO_FN(ET_RX_CLK),
GPIO_FN(ET_RX_DV), GPIO_FN(ET_ERXD0), GPIO_FN(ET_ERXD1),
GPIO_FN(ET_ERXD2), GPIO_FN(ET_ERXD3),
GPIO_FN(ET_ERXD4), GPIO_FN(ET_ERXD5), /* for GEther */
GPIO_FN(ET_ERXD6), GPIO_FN(ET_ERXD7), /* for GEther */
GPIO_FN(ET_RX_ER), GPIO_FN(ET_CRS), GPIO_FN(ET_MDC),
GPIO_FN(ET_MDIO), GPIO_FN(ET_LINK), GPIO_FN(ET_PHY_INT),
GPIO_FN(ET_WOL), GPIO_FN(ET_GTX_CLK),
/* DMA0 */
GPIO_FN(DREQ0), GPIO_FN(DACK0),
/* DMA1 */
GPIO_FN(DREQ1), GPIO_FN(DACK1),
/* SYSC */
GPIO_FN(RESETOUTS),
/* IRREM */
GPIO_FN(IROUT),
/* LCDC */
GPIO_FN(LCDC0_SELECT),
GPIO_FN(LCDC1_SELECT),
/* SDENC */
GPIO_FN(SDENC_CPG),
GPIO_FN(SDENC_DV_CLKI),
/* SYSC */
GPIO_FN(RESETP_PULLUP),
GPIO_FN(RESETP_PLAIN),
/* DEBUG */
GPIO_FN(EDEBGREQ_PULLDOWN),
GPIO_FN(EDEBGREQ_PULLUP),
GPIO_FN(TRACEAUD_FROM_VIO),
GPIO_FN(TRACEAUD_FROM_LCDC0),
GPIO_FN(TRACEAUD_FROM_MEMC),
};
static struct pinmux_cfg_reg pinmux_config_regs[] = {
PORTCR(0, 0xe6050000), /* PORT0CR */
PORTCR(1, 0xe6050001), /* PORT1CR */
PORTCR(2, 0xe6050002), /* PORT2CR */
PORTCR(3, 0xe6050003), /* PORT3CR */
PORTCR(4, 0xe6050004), /* PORT4CR */
PORTCR(5, 0xe6050005), /* PORT5CR */
PORTCR(6, 0xe6050006), /* PORT6CR */
PORTCR(7, 0xe6050007), /* PORT7CR */
PORTCR(8, 0xe6050008), /* PORT8CR */
PORTCR(9, 0xe6050009), /* PORT9CR */
PORTCR(10, 0xe605000a), /* PORT10CR */
PORTCR(11, 0xe605000b), /* PORT11CR */
PORTCR(12, 0xe605000c), /* PORT12CR */
PORTCR(13, 0xe605000d), /* PORT13CR */
PORTCR(14, 0xe605000e), /* PORT14CR */
PORTCR(15, 0xe605000f), /* PORT15CR */
PORTCR(16, 0xe6050010), /* PORT16CR */
PORTCR(17, 0xe6050011), /* PORT17CR */
PORTCR(18, 0xe6050012), /* PORT18CR */
PORTCR(19, 0xe6050013), /* PORT19CR */
PORTCR(20, 0xe6050014), /* PORT20CR */
PORTCR(21, 0xe6050015), /* PORT21CR */
PORTCR(22, 0xe6050016), /* PORT22CR */
PORTCR(23, 0xe6050017), /* PORT23CR */
PORTCR(24, 0xe6050018), /* PORT24CR */
PORTCR(25, 0xe6050019), /* PORT25CR */
PORTCR(26, 0xe605001a), /* PORT26CR */
PORTCR(27, 0xe605001b), /* PORT27CR */
PORTCR(28, 0xe605001c), /* PORT28CR */
PORTCR(29, 0xe605001d), /* PORT29CR */
PORTCR(30, 0xe605001e), /* PORT30CR */
PORTCR(31, 0xe605001f), /* PORT31CR */
PORTCR(32, 0xe6050020), /* PORT32CR */
PORTCR(33, 0xe6050021), /* PORT33CR */
PORTCR(34, 0xe6050022), /* PORT34CR */
PORTCR(35, 0xe6050023), /* PORT35CR */
PORTCR(36, 0xe6050024), /* PORT36CR */
PORTCR(37, 0xe6050025), /* PORT37CR */
PORTCR(38, 0xe6050026), /* PORT38CR */
PORTCR(39, 0xe6050027), /* PORT39CR */
PORTCR(40, 0xe6050028), /* PORT40CR */
PORTCR(41, 0xe6050029), /* PORT41CR */
PORTCR(42, 0xe605002a), /* PORT42CR */
PORTCR(43, 0xe605002b), /* PORT43CR */
PORTCR(44, 0xe605002c), /* PORT44CR */
PORTCR(45, 0xe605002d), /* PORT45CR */
PORTCR(46, 0xe605002e), /* PORT46CR */
PORTCR(47, 0xe605002f), /* PORT47CR */
PORTCR(48, 0xe6050030), /* PORT48CR */
PORTCR(49, 0xe6050031), /* PORT49CR */
PORTCR(50, 0xe6050032), /* PORT50CR */
PORTCR(51, 0xe6050033), /* PORT51CR */
PORTCR(52, 0xe6050034), /* PORT52CR */
PORTCR(53, 0xe6050035), /* PORT53CR */
PORTCR(54, 0xe6050036), /* PORT54CR */
PORTCR(55, 0xe6050037), /* PORT55CR */
PORTCR(56, 0xe6050038), /* PORT56CR */
PORTCR(57, 0xe6050039), /* PORT57CR */
PORTCR(58, 0xe605003a), /* PORT58CR */
PORTCR(59, 0xe605003b), /* PORT59CR */
PORTCR(60, 0xe605003c), /* PORT60CR */
PORTCR(61, 0xe605003d), /* PORT61CR */
PORTCR(62, 0xe605003e), /* PORT62CR */
PORTCR(63, 0xe605003f), /* PORT63CR */
PORTCR(64, 0xe6050040), /* PORT64CR */
PORTCR(65, 0xe6050041), /* PORT65CR */
PORTCR(66, 0xe6050042), /* PORT66CR */
PORTCR(67, 0xe6050043), /* PORT67CR */
PORTCR(68, 0xe6050044), /* PORT68CR */
PORTCR(69, 0xe6050045), /* PORT69CR */
PORTCR(70, 0xe6050046), /* PORT70CR */
PORTCR(71, 0xe6050047), /* PORT71CR */
PORTCR(72, 0xe6050048), /* PORT72CR */
PORTCR(73, 0xe6050049), /* PORT73CR */
PORTCR(74, 0xe605004a), /* PORT74CR */
PORTCR(75, 0xe605004b), /* PORT75CR */
PORTCR(76, 0xe605004c), /* PORT76CR */
PORTCR(77, 0xe605004d), /* PORT77CR */
PORTCR(78, 0xe605004e), /* PORT78CR */
PORTCR(79, 0xe605004f), /* PORT79CR */
PORTCR(80, 0xe6050050), /* PORT80CR */
PORTCR(81, 0xe6050051), /* PORT81CR */
PORTCR(82, 0xe6050052), /* PORT82CR */
PORTCR(83, 0xe6050053), /* PORT83CR */
PORTCR(84, 0xe6051054), /* PORT84CR */
PORTCR(85, 0xe6051055), /* PORT85CR */
PORTCR(86, 0xe6051056), /* PORT86CR */
PORTCR(87, 0xe6051057), /* PORT87CR */
PORTCR(88, 0xe6051058), /* PORT88CR */
PORTCR(89, 0xe6051059), /* PORT89CR */
PORTCR(90, 0xe605105a), /* PORT90CR */
PORTCR(91, 0xe605105b), /* PORT91CR */
PORTCR(92, 0xe605105c), /* PORT92CR */
PORTCR(93, 0xe605105d), /* PORT93CR */
PORTCR(94, 0xe605105e), /* PORT94CR */
PORTCR(95, 0xe605105f), /* PORT95CR */
PORTCR(96, 0xe6051060), /* PORT96CR */
PORTCR(97, 0xe6051061), /* PORT97CR */
PORTCR(98, 0xe6051062), /* PORT98CR */
PORTCR(99, 0xe6051063), /* PORT99CR */
PORTCR(100, 0xe6051064), /* PORT100CR */
PORTCR(101, 0xe6051065), /* PORT101CR */
PORTCR(102, 0xe6051066), /* PORT102CR */
PORTCR(103, 0xe6051067), /* PORT103CR */
PORTCR(104, 0xe6051068), /* PORT104CR */
PORTCR(105, 0xe6051069), /* PORT105CR */
PORTCR(106, 0xe605106a), /* PORT106CR */
PORTCR(107, 0xe605106b), /* PORT107CR */
PORTCR(108, 0xe605106c), /* PORT108CR */
PORTCR(109, 0xe605106d), /* PORT109CR */
PORTCR(110, 0xe605106e), /* PORT110CR */
PORTCR(111, 0xe605106f), /* PORT111CR */
PORTCR(112, 0xe6051070), /* PORT112CR */
PORTCR(113, 0xe6051071), /* PORT113CR */
PORTCR(114, 0xe6051072), /* PORT114CR */
PORTCR(115, 0xe6052073), /* PORT115CR */
PORTCR(116, 0xe6052074), /* PORT116CR */
PORTCR(117, 0xe6052075), /* PORT117CR */
PORTCR(118, 0xe6052076), /* PORT118CR */
PORTCR(119, 0xe6052077), /* PORT119CR */
PORTCR(120, 0xe6052078), /* PORT120CR */
PORTCR(121, 0xe6052079), /* PORT121CR */
PORTCR(122, 0xe605207a), /* PORT122CR */
PORTCR(123, 0xe605207b), /* PORT123CR */
PORTCR(124, 0xe605207c), /* PORT124CR */
PORTCR(125, 0xe605207d), /* PORT125CR */
PORTCR(126, 0xe605207e), /* PORT126CR */
PORTCR(127, 0xe605207f), /* PORT127CR */
PORTCR(128, 0xe6052080), /* PORT128CR */
PORTCR(129, 0xe6052081), /* PORT129CR */
PORTCR(130, 0xe6052082), /* PORT130CR */
PORTCR(131, 0xe6052083), /* PORT131CR */
PORTCR(132, 0xe6052084), /* PORT132CR */
PORTCR(133, 0xe6052085), /* PORT133CR */
PORTCR(134, 0xe6052086), /* PORT134CR */
PORTCR(135, 0xe6052087), /* PORT135CR */
PORTCR(136, 0xe6052088), /* PORT136CR */
PORTCR(137, 0xe6052089), /* PORT137CR */
PORTCR(138, 0xe605208a), /* PORT138CR */
PORTCR(139, 0xe605208b), /* PORT139CR */
PORTCR(140, 0xe605208c), /* PORT140CR */
PORTCR(141, 0xe605208d), /* PORT141CR */
PORTCR(142, 0xe605208e), /* PORT142CR */
PORTCR(143, 0xe605208f), /* PORT143CR */
PORTCR(144, 0xe6052090), /* PORT144CR */
PORTCR(145, 0xe6052091), /* PORT145CR */
PORTCR(146, 0xe6052092), /* PORT146CR */
PORTCR(147, 0xe6052093), /* PORT147CR */
PORTCR(148, 0xe6052094), /* PORT148CR */
PORTCR(149, 0xe6052095), /* PORT149CR */
PORTCR(150, 0xe6052096), /* PORT150CR */
PORTCR(151, 0xe6052097), /* PORT151CR */
PORTCR(152, 0xe6052098), /* PORT152CR */
PORTCR(153, 0xe6052099), /* PORT153CR */
PORTCR(154, 0xe605209a), /* PORT154CR */
PORTCR(155, 0xe605209b), /* PORT155CR */
PORTCR(156, 0xe605209c), /* PORT156CR */
PORTCR(157, 0xe605209d), /* PORT157CR */
PORTCR(158, 0xe605209e), /* PORT158CR */
PORTCR(159, 0xe605209f), /* PORT159CR */
PORTCR(160, 0xe60520a0), /* PORT160CR */
PORTCR(161, 0xe60520a1), /* PORT161CR */
PORTCR(162, 0xe60520a2), /* PORT162CR */
PORTCR(163, 0xe60520a3), /* PORT163CR */
PORTCR(164, 0xe60520a4), /* PORT164CR */
PORTCR(165, 0xe60520a5), /* PORT165CR */
PORTCR(166, 0xe60520a6), /* PORT166CR */
PORTCR(167, 0xe60520a7), /* PORT167CR */
PORTCR(168, 0xe60520a8), /* PORT168CR */
PORTCR(169, 0xe60520a9), /* PORT169CR */
PORTCR(170, 0xe60520aa), /* PORT170CR */
PORTCR(171, 0xe60520ab), /* PORT171CR */
PORTCR(172, 0xe60520ac), /* PORT172CR */
PORTCR(173, 0xe60520ad), /* PORT173CR */
PORTCR(174, 0xe60520ae), /* PORT174CR */
PORTCR(175, 0xe60520af), /* PORT175CR */
PORTCR(176, 0xe60520b0), /* PORT176CR */
PORTCR(177, 0xe60520b1), /* PORT177CR */
PORTCR(178, 0xe60520b2), /* PORT178CR */
PORTCR(179, 0xe60520b3), /* PORT179CR */
PORTCR(180, 0xe60520b4), /* PORT180CR */
PORTCR(181, 0xe60520b5), /* PORT181CR */
PORTCR(182, 0xe60520b6), /* PORT182CR */
PORTCR(183, 0xe60520b7), /* PORT183CR */
PORTCR(184, 0xe60520b8), /* PORT184CR */
PORTCR(185, 0xe60520b9), /* PORT185CR */
PORTCR(186, 0xe60520ba), /* PORT186CR */
PORTCR(187, 0xe60520bb), /* PORT187CR */
PORTCR(188, 0xe60520bc), /* PORT188CR */
PORTCR(189, 0xe60520bd), /* PORT189CR */
PORTCR(190, 0xe60520be), /* PORT190CR */
PORTCR(191, 0xe60520bf), /* PORT191CR */
PORTCR(192, 0xe60520c0), /* PORT192CR */
PORTCR(193, 0xe60520c1), /* PORT193CR */
PORTCR(194, 0xe60520c2), /* PORT194CR */
PORTCR(195, 0xe60520c3), /* PORT195CR */
PORTCR(196, 0xe60520c4), /* PORT196CR */
PORTCR(197, 0xe60520c5), /* PORT197CR */
PORTCR(198, 0xe60520c6), /* PORT198CR */
PORTCR(199, 0xe60520c7), /* PORT199CR */
PORTCR(200, 0xe60520c8), /* PORT200CR */
PORTCR(201, 0xe60520c9), /* PORT201CR */
PORTCR(202, 0xe60520ca), /* PORT202CR */
PORTCR(203, 0xe60520cb), /* PORT203CR */
PORTCR(204, 0xe60520cc), /* PORT204CR */
PORTCR(205, 0xe60520cd), /* PORT205CR */
PORTCR(206, 0xe60520ce), /* PORT206CR */
PORTCR(207, 0xe60520cf), /* PORT207CR */
PORTCR(208, 0xe60520d0), /* PORT208CR */
PORTCR(209, 0xe60520d1), /* PORT209CR */
PORTCR(210, 0xe60530d2), /* PORT210CR */
PORTCR(211, 0xe60530d3), /* PORT211CR */
{ PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) {
MSEL1CR_31_0, MSEL1CR_31_1,
MSEL1CR_30_0, MSEL1CR_30_1,
MSEL1CR_29_0, MSEL1CR_29_1,
MSEL1CR_28_0, MSEL1CR_28_1,
MSEL1CR_27_0, MSEL1CR_27_1,
MSEL1CR_26_0, MSEL1CR_26_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
MSEL1CR_16_0, MSEL1CR_16_1,
MSEL1CR_15_0, MSEL1CR_15_1,
MSEL1CR_14_0, MSEL1CR_14_1,
MSEL1CR_13_0, MSEL1CR_13_1,
MSEL1CR_12_0, MSEL1CR_12_1,
0, 0, 0, 0,
MSEL1CR_9_0, MSEL1CR_9_1,
0, 0,
MSEL1CR_7_0, MSEL1CR_7_1,
MSEL1CR_6_0, MSEL1CR_6_1,
MSEL1CR_5_0, MSEL1CR_5_1,
MSEL1CR_4_0, MSEL1CR_4_1,
MSEL1CR_3_0, MSEL1CR_3_1,
MSEL1CR_2_0, MSEL1CR_2_1,
0, 0,
MSEL1CR_0_0, MSEL1CR_0_1,
}
},
{ PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
MSEL3CR_15_0, MSEL3CR_15_1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
MSEL3CR_6_0, MSEL3CR_6_1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
}
},
{ PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
MSEL4CR_19_0, MSEL4CR_19_1,
MSEL4CR_18_0, MSEL4CR_18_1,
0, 0, 0, 0,
MSEL4CR_15_0, MSEL4CR_15_1,
0, 0, 0, 0, 0, 0, 0, 0,
MSEL4CR_10_0, MSEL4CR_10_1,
0, 0, 0, 0, 0, 0,
MSEL4CR_6_0, MSEL4CR_6_1,
0, 0,
MSEL4CR_4_0, MSEL4CR_4_1,
0, 0, 0, 0,
MSEL4CR_1_0, MSEL4CR_1_1,
0, 0,
}
},
{ PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1) {
MSEL5CR_31_0, MSEL5CR_31_1,
MSEL5CR_30_0, MSEL5CR_30_1,
MSEL5CR_29_0, MSEL5CR_29_1,
0, 0,
MSEL5CR_27_0, MSEL5CR_27_1,
0, 0,
MSEL5CR_25_0, MSEL5CR_25_1,
0, 0,
MSEL5CR_23_0, MSEL5CR_23_1,
0, 0,
MSEL5CR_21_0, MSEL5CR_21_1,
0, 0,
MSEL5CR_19_0, MSEL5CR_19_1,
0, 0,
MSEL5CR_17_0, MSEL5CR_17_1,
0, 0,
MSEL5CR_15_0, MSEL5CR_15_1,
MSEL5CR_14_0, MSEL5CR_14_1,
MSEL5CR_13_0, MSEL5CR_13_1,
MSEL5CR_12_0, MSEL5CR_12_1,
MSEL5CR_11_0, MSEL5CR_11_1,
MSEL5CR_10_0, MSEL5CR_10_1,
0, 0,
MSEL5CR_8_0, MSEL5CR_8_1,
MSEL5CR_7_0, MSEL5CR_7_1,
MSEL5CR_6_0, MSEL5CR_6_1,
MSEL5CR_5_0, MSEL5CR_5_1,
MSEL5CR_4_0, MSEL5CR_4_1,
MSEL5CR_3_0, MSEL5CR_3_1,
MSEL5CR_2_0, MSEL5CR_2_1,
0, 0,
MSEL5CR_0_0, MSEL5CR_0_1,
}
},
{ },
};
static struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PORTL031_000DR", 0xe6054800, 32) {
PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
},
{ PINMUX_DATA_REG("PORTL063_032DR", 0xe6054804, 32) {
PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
},
{ PINMUX_DATA_REG("PORTL095_064DR", 0xe6054808, 32) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
},
{ PINMUX_DATA_REG("PORTD095_064DR", 0xe6055808, 32) {
PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 }
},
{ PINMUX_DATA_REG("PORTD127_096DR", 0xe605580c, 32) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, PORT114_DATA, PORT113_DATA, PORT112_DATA,
PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
},
{ PINMUX_DATA_REG("PORTR127_096DR", 0xe605680C, 32) {
PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA,
PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA,
PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA,
PORT115_DATA, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 }
},
{ PINMUX_DATA_REG("PORTR159_128DR", 0xe6056810, 32) {
PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
},
{ PINMUX_DATA_REG("PORTR191_160DR", 0xe6056814, 32) {
PORT191_DATA, PORT190_DATA, PORT189_DATA, PORT188_DATA,
PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA,
PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA,
PORT179_DATA, PORT178_DATA, PORT177_DATA, PORT176_DATA,
PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA,
PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA,
PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA,
PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
},
{ PINMUX_DATA_REG("PORTR223_192DR", 0xe6056818, 32) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, PORT209_DATA, PORT208_DATA,
PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
},
{ PINMUX_DATA_REG("PORTU223_192DR", 0xe6057818, 32) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PORT211_DATA, PORT210_DATA, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 }
},
{ },
};
static struct pinmux_info r8a7740_pinmux_info = {
.name = "r8a7740_pfc",
.reserved_id = PINMUX_RESERVED,
.data = { PINMUX_DATA_BEGIN,
PINMUX_DATA_END },
.input = { PINMUX_INPUT_BEGIN,
PINMUX_INPUT_END },
.input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
PINMUX_INPUT_PULLUP_END },
.input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN,
PINMUX_INPUT_PULLDOWN_END },
.output = { PINMUX_OUTPUT_BEGIN,
PINMUX_OUTPUT_END },
.mark = { PINMUX_MARK_BEGIN,
PINMUX_MARK_END },
.function = { PINMUX_FUNCTION_BEGIN,
PINMUX_FUNCTION_END },
.first_gpio = GPIO_PORT0,
.last_gpio = GPIO_FN_TRACEAUD_FROM_MEMC,
.gpios = pinmux_gpios,
.cfg_regs = pinmux_config_regs,
.data_regs = pinmux_data_regs,
.gpio_data = pinmux_data,
.gpio_data_size = ARRAY_SIZE(pinmux_data),
};
void r8a7740_pinmux_init(void)
{
register_pinmux(&r8a7740_pinmux_info);
}
| gpl-2.0 |
mifl/android_kernel_pantech_ef45k | drivers/scsi/qla4xxx/ql4_mbx.c | 4820 | 56259 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2010 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
/**
* qla4xxx_mailbox_command - issues mailbox commands
* @ha: Pointer to host adapter structure.
* @inCount: number of mailbox registers to load.
* @outCount: number of mailbox registers to return.
* @mbx_cmd: data pointer for mailbox in registers.
* @mbx_sts: data pointer for mailbox out registers.
*
* This routine issue mailbox commands and waits for completion.
* If outCount is 0, this routine completes successfully WITHOUT waiting
* for the mailbox command to complete.
**/
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd,
uint32_t *mbx_sts)
{
int status = QLA_ERROR;
uint8_t i;
u_long wait_count;
uint32_t intr_status;
unsigned long flags = 0;
uint32_t dev_state;
/* Make sure that pointers are valid */
if (!mbx_cmd || !mbx_sts) {
DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
"pointer\n", ha->host_no, __func__));
return status;
}
if (is_qla40XX(ha)) {
if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
"prematurely completing mbx cmd as "
"adapter removal detected\n",
ha->host_no, __func__));
return status;
}
}
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
"prematurely completing mbx cmd as firmware "
"recovery detected\n", ha->host_no, __func__));
return status;
}
/* Do not send any mbx cmd if h/w is in failed state*/
qla4_8xxx_idc_lock(ha);
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
qla4_8xxx_idc_unlock(ha);
if (dev_state == QLA82XX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
"failed state, do not send any mailbox commands\n",
ha->host_no, __func__);
return status;
}
}
if ((is_aer_supported(ha)) &&
(test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
"timeout MBX Exiting.\n", ha->host_no, __func__));
return status;
}
/* Mailbox code active */
wait_count = MBOX_TOV * 100;
while (wait_count--) {
mutex_lock(&ha->mbox_sem);
if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
set_bit(AF_MBOX_COMMAND, &ha->flags);
mutex_unlock(&ha->mbox_sem);
break;
}
mutex_unlock(&ha->mbox_sem);
if (!wait_count) {
DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n",
ha->host_no, __func__));
return status;
}
msleep(10);
}
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->mbox_status_count = outCount;
for (i = 0; i < outCount; i++)
ha->mbox_status[i] = 0;
if (is_qla8022(ha)) {
/* Load all mailbox registers, except mailbox 0. */
DEBUG5(
printk("scsi%ld: %s: Cmd ", ha->host_no, __func__);
for (i = 0; i < inCount; i++)
printk("mb%d=%04x ", i, mbx_cmd[i]);
printk("\n"));
for (i = 1; i < inCount; i++)
writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]);
writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]);
readl(&ha->qla4_8xxx_reg->mailbox_in[0]);
writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint);
} else {
/* Load all mailbox registers, except mailbox 0. */
for (i = 1; i < inCount; i++)
writel(mbx_cmd[i], &ha->reg->mailbox[i]);
/* Wakeup firmware */
writel(mbx_cmd[0], &ha->reg->mailbox[0]);
readl(&ha->reg->mailbox[0]);
writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for completion */
/*
* If we don't want status, don't wait for the mailbox command to
* complete. For example, MBOX_CMD_RESET_FW doesn't return status,
* you must poll the inbound Interrupt Mask for completion.
*/
if (outCount == 0) {
status = QLA_SUCCESS;
goto mbox_exit;
}
/*
* Wait for completion: Poll or completion queue
*/
if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
test_bit(AF_ONLINE, &ha->flags) &&
!test_bit(AF_HA_REMOVAL, &ha->flags)) {
/* Do not poll for completion. Use completion queue */
set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
} else {
/* Poll for command to complete */
wait_count = jiffies + MBOX_TOV * HZ;
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
if (time_after_eq(jiffies, wait_count))
break;
/*
* Service the interrupt.
* The ISR will save the mailbox status registers
* to a temporary storage location in the adapter
* structure.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
if (is_qla8022(ha)) {
intr_status =
readl(&ha->qla4_8xxx_reg->host_int);
if (intr_status & ISRX_82XX_RISC_INT) {
ha->mbox_status_count = outCount;
intr_status =
readl(&ha->qla4_8xxx_reg->host_status);
ha->isp_ops->interrupt_service_routine(
ha, intr_status);
if (test_bit(AF_INTERRUPTS_ON,
&ha->flags) &&
test_bit(AF_INTx_ENABLED,
&ha->flags))
qla4_8xxx_wr_32(ha,
ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
} else {
intr_status = readl(&ha->reg->ctrl_status);
if (intr_status & INTR_PENDING) {
/*
* Service the interrupt.
* The ISR will save the mailbox status
* registers to a temporary storage
* location in the adapter structure.
*/
ha->mbox_status_count = outCount;
ha->isp_ops->interrupt_service_routine(
ha, intr_status);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
msleep(10);
}
}
/* Check for mailbox timeout. */
if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
if (is_qla8022(ha) &&
test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s: prematurely completing mbx cmd as "
"firmware recovery detected\n",
ha->host_no, __func__));
goto mbox_exit;
}
DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
" Scheduling Adapter Reset\n", ha->host_no,
mbx_cmd[0]));
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
if (is_qla8022(ha)) {
ql4_printk(KERN_INFO, ha,
"disabling pause transmit on port 0 & 1.\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
CRB_NIU_XG_PAUSE_CTL_P0 |
CRB_NIU_XG_PAUSE_CTL_P1);
}
goto mbox_exit;
}
/*
* Copy the mailbox out registers to the caller's mailbox in/out
* structure.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
for (i = 0; i < outCount; i++)
mbx_sts[i] = ha->mbox_status[i];
/* Set return status and error flags (if applicable). */
switch (ha->mbox_status[0]) {
case MBOX_STS_COMMAND_COMPLETE:
status = QLA_SUCCESS;
break;
case MBOX_STS_INTERMEDIATE_COMPLETION:
status = QLA_SUCCESS;
break;
case MBOX_STS_BUSY:
DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
ha->host_no, __func__, mbx_cmd[0]));
ha->mailbox_timeout_count++;
break;
default:
DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
"sts = %08X ****\n", ha->host_no, __func__,
mbx_cmd[0], mbx_sts[0]));
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mbox_exit:
mutex_lock(&ha->mbox_sem);
clear_bit(AF_MBOX_COMMAND, &ha->flags);
mutex_unlock(&ha->mbox_sem);
clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
return status;
}
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
{
set_bit(AF_FW_RECOVERY, &ha->flags);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
ha->host_no, __func__);
if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
complete(&ha->mbx_intr_comp);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
"recovery, doing premature completion of "
"mbx cmd\n", ha->host_no, __func__);
} else {
set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
"recovery, doing premature completion of "
"polling mbx cmd\n", ha->host_no, __func__);
}
}
}
static uint8_t
qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
{
memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
if (is_qla8022(ha))
qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
mbox_cmd[1] = 0;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
"MBOX_CMD_INITIALIZE_FIRMWARE"
" failed w/ status %04X\n",
ha->host_no, __func__, mbox_sts[0]));
return QLA_ERROR;
}
return QLA_SUCCESS;
}
uint8_t
qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
{
memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
"MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
" failed w/ status %04X\n",
ha->host_no, __func__, mbox_sts[0]));
return QLA_ERROR;
}
return QLA_SUCCESS;
}
static void
qla4xxx_update_local_ip(struct scsi_qla_host *ha,
struct addr_ctrl_blk *init_fw_cb)
{
ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
ha->ip_config.ipv4_addr_state =
le16_to_cpu(init_fw_cb->ipv4_addr_state);
ha->ip_config.eth_mtu_size =
le16_to_cpu(init_fw_cb->eth_mtu_size);
ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
if (ha->acb_version == ACB_SUPPORTED) {
ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
ha->ip_config.ipv6_addl_options =
le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
}
/* Save IPv4 Address Info */
memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
min(sizeof(ha->ip_config.ip_address),
sizeof(init_fw_cb->ipv4_addr)));
memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
min(sizeof(ha->ip_config.subnet_mask),
sizeof(init_fw_cb->ipv4_subnet)));
memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
min(sizeof(ha->ip_config.gateway),
sizeof(init_fw_cb->ipv4_gw_addr)));
ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
if (is_ipv6_enabled(ha)) {
/* Save IPv6 Address */
ha->ip_config.ipv6_link_local_state =
le16_to_cpu(init_fw_cb->ipv6_lnk_lcl_addr_state);
ha->ip_config.ipv6_addr0_state =
le16_to_cpu(init_fw_cb->ipv6_addr0_state);
ha->ip_config.ipv6_addr1_state =
le16_to_cpu(init_fw_cb->ipv6_addr1_state);
ha->ip_config.ipv6_default_router_state =
le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state);
ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
init_fw_cb->ipv6_if_id,
min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
sizeof(init_fw_cb->ipv6_if_id)));
memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
min(sizeof(ha->ip_config.ipv6_addr0),
sizeof(init_fw_cb->ipv6_addr0)));
memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
min(sizeof(ha->ip_config.ipv6_addr1),
sizeof(init_fw_cb->ipv6_addr1)));
memcpy(&ha->ip_config.ipv6_default_router_addr,
init_fw_cb->ipv6_dflt_rtr_addr,
min(sizeof(ha->ip_config.ipv6_default_router_addr),
sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
ha->ip_config.ipv6_vlan_tag =
be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
}
}
uint8_t
qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
uint32_t *mbox_cmd,
uint32_t *mbox_sts,
struct addr_ctrl_blk *init_fw_cb,
dma_addr_t init_fw_cb_dma)
{
if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
!= QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING
"scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
return QLA_ERROR;
}
DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
/* Save some info in adapter structure. */
ha->acb_version = init_fw_cb->acb_version;
ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
ha->heartbeat_interval = init_fw_cb->hb_interval;
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iscsi_name)));
ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
qla4xxx_update_local_ip(ha, init_fw_cb);
return QLA_SUCCESS;
}
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
{
struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
goto exit_init_fw_cb_no_free;
}
memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
dma_free_coherent(&ha->pdev->dev,
sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
goto exit_init_fw_cb;
}
/* Initialize request and response queues. */
qla4xxx_init_rings(ha);
/* Fill in the request and response queue information. */
init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
/* Set up required options. */
init_fw_cb->fw_options |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
if (is_qla8022(ha))
init_fw_cb->fw_options |=
__constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
init_fw_cb->add_fw_options = 0;
init_fw_cb->add_fw_options |=
__constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
init_fw_cb->add_fw_options |=
__constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
!= QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING
"scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
ha->host_no, __func__));
goto exit_init_fw_cb;
}
if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
ha->host_no, __func__));
goto exit_init_fw_cb;
}
status = QLA_SUCCESS;
exit_init_fw_cb:
dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
exit_init_fw_cb_no_free:
return status;
}
/**
* qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
{
struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
return QLA_ERROR;
}
/* Get Initialize Firmware Control Block. */
memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
dma_free_coherent(&ha->pdev->dev,
sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return QLA_ERROR;
}
/* Save IP Address. */
qla4xxx_update_local_ip(ha, init_fw_cb);
dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return QLA_SUCCESS;
}
/**
* qla4xxx_get_firmware_state - gets firmware state of HBA
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
"status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
return QLA_ERROR;
}
ha->firmware_state = mbox_sts[1];
ha->board_id = mbox_sts[2];
ha->addl_fw_state = mbox_sts[3];
DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
ha->host_no, __func__, ha->firmware_state);)
return QLA_SUCCESS;
}
/**
* qla4xxx_get_firmware_status - retrieves firmware status
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
"status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
return QLA_ERROR;
}
ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
ha->host_no, mbox_sts[2]);
return QLA_SUCCESS;
}
/**
* qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
* @fw_ddb_entry: Pointer to firmware's device database entry structure
* @num_valid_ddb_entries: Pointer to number of valid ddb entries
* @next_ddb_index: Pointer to next valid device database index
* @fw_ddb_device_state: Pointer to device state
**/
int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t fw_ddb_index,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma,
uint32_t *num_valid_ddb_entries,
uint32_t *next_ddb_index,
uint32_t *fw_ddb_device_state,
uint32_t *conn_err_detail,
uint16_t *tcp_source_port_num,
uint16_t *connection_id)
{
int status = QLA_ERROR;
uint16_t options;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Make sure the device index is valid */
if (fw_ddb_index >= MAX_DDB_ENTRIES) {
DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n",
ha->host_no, __func__, fw_ddb_index));
goto exit_get_fwddb;
}
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
if (fw_ddb_entry)
memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
mbox_cmd[4] = sizeof(struct dev_db_entry);
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
" with status 0x%04X\n", ha->host_no, __func__,
mbox_sts[0]));
goto exit_get_fwddb;
}
if (fw_ddb_index != mbox_sts[1]) {
DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n",
ha->host_no, __func__, fw_ddb_index,
mbox_sts[1]));
goto exit_get_fwddb;
}
if (fw_ddb_entry) {
options = le16_to_cpu(fw_ddb_entry->options);
if (options & DDB_OPT_IPV6_DEVICE) {
ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
"Next %d State %04x ConnErr %08x %pI6 "
":%04d \"%s\"\n", __func__, fw_ddb_index,
mbox_sts[0], mbox_sts[2], mbox_sts[3],
mbox_sts[4], mbox_sts[5],
fw_ddb_entry->ip_addr,
le16_to_cpu(fw_ddb_entry->port),
fw_ddb_entry->iscsi_name);
} else {
ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
"Next %d State %04x ConnErr %08x %pI4 "
":%04d \"%s\"\n", __func__, fw_ddb_index,
mbox_sts[0], mbox_sts[2], mbox_sts[3],
mbox_sts[4], mbox_sts[5],
fw_ddb_entry->ip_addr,
le16_to_cpu(fw_ddb_entry->port),
fw_ddb_entry->iscsi_name);
}
}
if (num_valid_ddb_entries)
*num_valid_ddb_entries = mbox_sts[2];
if (next_ddb_index)
*next_ddb_index = mbox_sts[3];
if (fw_ddb_device_state)
*fw_ddb_device_state = mbox_sts[4];
/*
* RA: This mailbox has been changed to pass connection error and
* details. Its true for ISP4010 as per Version E - Not sure when it
* was changed. Get the time2wait from the fw_dd_entry field :
* default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
* struct.
*/
if (conn_err_detail)
*conn_err_detail = mbox_sts[5];
if (tcp_source_port_num)
*tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
if (connection_id)
*connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
status = QLA_SUCCESS;
exit_get_fwddb:
return status;
}
int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
mbox_cmd[1] = fw_ddb_index;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
&mbox_sts[0]);
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
__func__, status, mbox_sts[0], mbox_sts[1]));
return status;
}
/**
* qla4xxx_set_fwddb_entry - sets a ddb entry.
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
* @fw_ddb_entry_dma: dma address of ddb entry
* @mbx_sts: mailbox 0 to be returned or NULL
*
* This routine initializes or updates the adapter's device database
* entry for the specified device.
**/
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status;
/* Do not wait for completion. The firmware will send us an
* ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
mbox_cmd[4] = sizeof(struct dev_db_entry);
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
&mbox_sts[0]);
if (mbx_sts)
*mbx_sts = mbox_sts[0];
DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
return status;
}
int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int options)
{
int status;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
mbox_cmd[3] = options;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
&mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
"failed sts %04X %04X", __func__,
mbox_sts[0], mbox_sts[1]));
}
return status;
}
/**
* qla4xxx_get_crash_record - retrieves crash record.
* @ha: Pointer to host adapter structure.
*
* This routine retrieves a crash record from the QLA4010 after an 8002h aen.
**/
void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
struct crash_record *crash_record = NULL;
dma_addr_t crash_record_dma = 0;
uint32_t crash_record_size = 0;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
ha->host_no, __func__));
goto exit_get_crash_record;
}
crash_record_size = mbox_sts[4];
if (crash_record_size == 0) {
DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
ha->host_no, __func__));
goto exit_get_crash_record;
}
/* Alloc Memory for Crash Record. */
crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
&crash_record_dma, GFP_KERNEL);
if (crash_record == NULL)
goto exit_get_crash_record;
/* Get Crash Record. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
mbox_cmd[2] = LSDW(crash_record_dma);
mbox_cmd[3] = MSDW(crash_record_dma);
mbox_cmd[4] = crash_record_size;
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_crash_record;
/* Dump Crash Record. */
exit_get_crash_record:
if (crash_record)
dma_free_coherent(&ha->pdev->dev, crash_record_size,
crash_record, crash_record_dma);
}
/**
* qla4xxx_get_conn_event_log - retrieves connection event log
* @ha: Pointer to host adapter structure.
**/
void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
struct conn_event_log_entry *event_log = NULL;
dma_addr_t event_log_dma = 0;
uint32_t event_log_size = 0;
uint32_t num_valid_entries;
uint32_t oldest_entry = 0;
uint32_t max_event_log_entries;
uint8_t i;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_event_log;
event_log_size = mbox_sts[4];
if (event_log_size == 0)
goto exit_get_event_log;
/* Alloc Memory for Crash Record. */
event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
&event_log_dma, GFP_KERNEL);
if (event_log == NULL)
goto exit_get_event_log;
/* Get Crash Record. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma);
mbox_cmd[3] = MSDW(event_log_dma);
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
"log!\n", ha->host_no, __func__));
goto exit_get_event_log;
}
/* Dump Event Log. */
num_valid_entries = mbox_sts[1];
max_event_log_entries = event_log_size /
sizeof(struct conn_event_log_entry);
if (num_valid_entries > max_event_log_entries)
oldest_entry = num_valid_entries % max_event_log_entries;
DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
ha->host_no, num_valid_entries));
if (ql4xextended_error_logging == 3) {
if (oldest_entry == 0) {
/* Circular Buffer has not wrapped around */
for (i=0; i < num_valid_entries; i++) {
qla4xxx_dump_buffer((uint8_t *)event_log+
(i*sizeof(*event_log)),
sizeof(*event_log));
}
}
else {
/* Circular Buffer has wrapped around -
* display accordingly*/
for (i=oldest_entry; i < max_event_log_entries; i++) {
qla4xxx_dump_buffer((uint8_t *)event_log+
(i*sizeof(*event_log)),
sizeof(*event_log));
}
for (i=0; i < oldest_entry; i++) {
qla4xxx_dump_buffer((uint8_t *)event_log+
(i*sizeof(*event_log)),
sizeof(*event_log));
}
}
}
exit_get_event_log:
if (event_log)
dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
event_log_dma);
}
/**
* qla4xxx_abort_task - issues Abort Task
* @ha: Pointer to host adapter structure.
* @srb: Pointer to srb entry
*
* This routine performs a LUN RESET on the specified target/lun.
* The caller must ensure that the ddb_entry and lun_entry pointers
* are valid before calling this routine.
**/
int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
struct scsi_cmnd *cmd = srb->cmd;
int status = QLA_SUCCESS;
unsigned long flags = 0;
uint32_t index;
/*
* Send abort task command to ISP, so that the ISP will return
* request with ABORT status
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
spin_lock_irqsave(&ha->hardware_lock, flags);
index = (unsigned long)(unsigned char *)cmd->host_scribble;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Firmware already posted completion on response queue */
if (index == MAX_SRBS)
return status;
mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
mbox_cmd[1] = srb->ddb->fw_ddb_index;
mbox_cmd[2] = index;
/* Immediate Command Enable */
mbox_cmd[5] = 0x01;
qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
&mbox_sts[0]);
if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
status = QLA_ERROR;
DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: "
"mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
}
return status;
}
/**
* qla4xxx_reset_lun - issues LUN Reset
* @ha: Pointer to host adapter structure.
* @ddb_entry: Pointer to device database entry
* @lun: lun number
*
* This routine performs a LUN RESET on the specified target/lun.
* The caller must ensure that the ddb_entry and lun_entry pointers
* are valid before calling this routine.
**/
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
int lun)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
ddb_entry->fw_ddb_index, lun));
/*
* Send lun reset command to ISP, so that the ISP will return all
* outstanding requests with RESET status
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_LUN_RESET;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
mbox_cmd[2] = lun << 8;
mbox_cmd[5] = 0x01; /* Immediate Command Enable */
qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
status = QLA_ERROR;
return status;
}
/**
* qla4xxx_reset_target - issues target Reset
* @ha: Pointer to host adapter structure.
* @db_entry: Pointer to device database entry
* @un_entry: Pointer to lun entry structure
*
* This routine performs a TARGET RESET on the specified target.
* The caller must ensure that the ddb_entry pointers
* are valid before calling this routine.
**/
int qla4xxx_reset_target(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
ddb_entry->fw_ddb_index));
/*
* Send target reset command to ISP, so that the ISP will return all
* outstanding requests with RESET status
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
mbox_cmd[5] = 0x01; /* Immediate Command Enable */
qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
&mbox_sts[0]);
if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
status = QLA_ERROR;
return status;
}
int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t len)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_READ_FLASH;
mbox_cmd[1] = LSDW(dma_addr);
mbox_cmd[2] = MSDW(dma_addr);
mbox_cmd[3] = offset;
mbox_cmd[4] = len;
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
"status %04X %04X, offset %08x, len %08x\n", ha->host_no,
__func__, mbox_sts[0], mbox_sts[1], offset, len));
return QLA_ERROR;
}
return QLA_SUCCESS;
}
/**
* qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
* @ha: Pointer to host adapter structure.
*
* Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
* Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
* those mailboxes, if unused.
**/
int qla4xxx_about_firmware(struct scsi_qla_host *ha)
{
struct about_fw_info *about_fw = NULL;
dma_addr_t about_fw_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
about_fw = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct about_fw_info),
&about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
return status;
}
memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
mbox_cmd[2] = LSDW(about_fw_dma);
mbox_cmd[3] = MSDW(about_fw_dma);
mbox_cmd[4] = sizeof(struct about_fw_info);
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
&mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
"failed w/ status %04X\n", __func__,
mbox_sts[0]));
goto exit_about_fw;
}
/* Save version information. */
ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
ha->patch_number = le16_to_cpu(about_fw->fw_patch);
ha->build_number = le16_to_cpu(about_fw->fw_build);
ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
status = QLA_SUCCESS;
exit_about_fw:
dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
about_fw, about_fw_dma);
return status;
}
static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
mbox_cmd[1] = options;
mbox_cmd[2] = LSDW(dma_addr);
mbox_cmd[3] = MSDW(dma_addr);
if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
ha->host_no, __func__, mbox_sts[0]));
return QLA_ERROR;
}
return QLA_SUCCESS;
}
int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
uint32_t *mbx_sts)
{
int status;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
mbox_cmd[1] = ddb_index;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
&mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
__func__, mbox_sts[0]));
}
*mbx_sts = mbox_sts[0];
return status;
}
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
{
int status;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
mbox_cmd[1] = ddb_index;
status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
&mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
__func__, mbox_sts[0]));
}
return status;
}
int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t length, uint32_t options)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
mbox_cmd[1] = LSDW(dma_addr);
mbox_cmd[2] = MSDW(dma_addr);
mbox_cmd[3] = offset;
mbox_cmd[4] = length;
mbox_cmd[5] = options;
status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
"failed w/ status %04X, mbx1 %04X\n",
__func__, mbox_sts[0], mbox_sts[1]));
}
return status;
}
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
{
uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
uint32_t dev_db_end_offset;
int status = QLA_ERROR;
memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
dev_db_end_offset = FLASH_OFFSET_DB_END;
if (dev_db_start_offset > dev_db_end_offset) {
DEBUG2(ql4_printk(KERN_ERR, ha,
"%s:Invalid DDB index %d", __func__,
ddb_index));
goto exit_bootdb_failed;
}
if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
"failed\n", ha->host_no, __func__);
goto exit_bootdb_failed;
}
if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
status = QLA_SUCCESS;
exit_bootdb_failed:
return status;
}
int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
uint16_t idx)
{
int ret = 0;
int rval = QLA_ERROR;
uint32_t offset = 0, chap_size;
struct ql4_chap_table *chap_table;
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
if (chap_table == NULL) {
ret = -ENOMEM;
goto exit_get_chap;
}
chap_size = sizeof(struct ql4_chap_table);
memset(chap_table, 0, chap_size);
if (is_qla40XX(ha))
offset = FLASH_CHAP_OFFSET | (idx * chap_size);
else {
offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
/* flt_chap_size is CHAP table size for both ports
* so divide it by 2 to calculate the offset for second port
*/
if (ha->port_num == 1)
offset += (ha->hw.flt_chap_size / 2);
offset += (idx * chap_size);
}
rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
if (rval != QLA_SUCCESS) {
ret = -EINVAL;
goto exit_get_chap;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
__le16_to_cpu(chap_table->cookie)));
if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
goto exit_get_chap;
}
strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
exit_get_chap:
dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
return ret;
}
static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
char *password, uint16_t idx, int bidi)
{
int ret = 0;
int rval = QLA_ERROR;
uint32_t offset = 0;
struct ql4_chap_table *chap_table;
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
if (chap_table == NULL) {
ret = -ENOMEM;
goto exit_set_chap;
}
memset(chap_table, 0, sizeof(struct ql4_chap_table));
if (bidi)
chap_table->flags |= BIT_6; /* peer */
else
chap_table->flags |= BIT_7; /* local */
chap_table->secret_len = strlen(password);
strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
rval = qla4xxx_set_flash(ha, chap_dma, offset,
sizeof(struct ql4_chap_table),
FLASH_OPT_RMW_COMMIT);
if (rval == QLA_SUCCESS && ha->chap_list) {
/* Update ha chap_list cache */
memcpy((struct ql4_chap_table *)ha->chap_list + idx,
chap_table, sizeof(struct ql4_chap_table));
}
dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
if (rval != QLA_SUCCESS)
ret = -EINVAL;
exit_set_chap:
return ret;
}
/**
* qla4xxx_get_chap_index - Get chap index given username and secret
* @ha: pointer to adapter structure
* @username: CHAP username to be searched
* @password: CHAP password to be searched
* @bidi: Is this a BIDI CHAP
* @chap_index: CHAP index to be returned
*
* Match the username and password in the chap_list, return the index if a
* match is found. If a match is not found then add the entry in FLASH and
* return the index at which entry is written in the FLASH.
**/
int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
char *password, int bidi, uint16_t *chap_index)
{
int i, rval;
int free_index = -1;
int found_index = 0;
int max_chap_entries = 0;
struct ql4_chap_table *chap_table;
if (is_qla8022(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
if (!ha->chap_list) {
ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
return QLA_ERROR;
}
if (!username || !password) {
ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
return QLA_ERROR;
}
mutex_lock(&ha->chap_sem);
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
__constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
free_index = i;
continue;
}
if (bidi) {
if (chap_table->flags & BIT_7)
continue;
} else {
if (chap_table->flags & BIT_6)
continue;
}
if (!strncmp(chap_table->secret, password,
MAX_CHAP_SECRET_LEN) &&
!strncmp(chap_table->name, username,
MAX_CHAP_NAME_LEN)) {
*chap_index = i;
found_index = 1;
break;
}
}
/* If chap entry is not present and a free index is available then
* write the entry in flash
*/
if (!found_index && free_index != -1) {
rval = qla4xxx_set_chap(ha, username, password,
free_index, bidi);
if (!rval) {
*chap_index = free_index;
found_index = 1;
}
}
mutex_unlock(&ha->chap_sem);
if (found_index)
return QLA_SUCCESS;
return QLA_ERROR;
}
int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
uint16_t fw_ddb_index,
uint16_t connection_id,
uint16_t option)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
mbox_cmd[1] = fw_ddb_index;
mbox_cmd[2] = connection_id;
mbox_cmd[3] = option;
status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
"option %04x failed w/ status %04X %04X\n",
__func__, option, mbox_sts[0], mbox_sts[1]));
}
return status;
}
int qla4xxx_disable_acb(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
"failed w/ status %04X %04X %04X", __func__,
mbox_sts[0], mbox_sts[1], mbox_sts[2]));
}
return status;
}
int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
uint32_t acb_type, uint32_t len)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_ACB;
mbox_cmd[1] = acb_type;
mbox_cmd[2] = LSDW(acb_dma);
mbox_cmd[3] = MSDW(acb_dma);
mbox_cmd[4] = len;
status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
"failed w/ status %04X\n", __func__,
mbox_sts[0]));
}
return status;
}
int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
uint32_t *mbox_sts, dma_addr_t acb_dma)
{
int status = QLA_SUCCESS;
memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
mbox_cmd[0] = MBOX_CMD_SET_ACB;
mbox_cmd[1] = 0; /* Primary ACB */
mbox_cmd[2] = LSDW(acb_dma);
mbox_cmd[3] = MSDW(acb_dma);
mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB "
"failed w/ status %04X\n", __func__,
mbox_sts[0]));
}
return status;
}
int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
struct iscsi_cls_conn *cls_conn,
uint32_t *mbx_sts)
{
struct dev_db_entry *fw_ddb_entry;
struct iscsi_conn *conn;
struct iscsi_session *sess;
struct qla_conn *qla_conn;
struct sockaddr *dst_addr;
dma_addr_t fw_ddb_entry_dma;
int status = QLA_SUCCESS;
int rval = 0;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
char *ip;
uint16_t iscsi_opts = 0;
uint32_t options = 0;
uint16_t idx, *ptid;
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
DEBUG2(ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer.\n",
__func__));
rval = -ENOMEM;
goto exit_set_param_no_free;
}
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
sess = conn->session;
dst_addr = &qla_conn->qla_ep->dst_addr;
if (dst_addr->sa_family == AF_INET6)
options |= IPV6_DEFAULT_DDB_ENTRY;
status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
if (status == QLA_ERROR) {
rval = -EINVAL;
goto exit_set_param;
}
ptid = (uint16_t *)&fw_ddb_entry->isid[1];
*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
if (sess->targetname != NULL) {
memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
min(strlen(sess->targetname),
sizeof(fw_ddb_entry->iscsi_name)));
}
memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
if (dst_addr->sa_family == AF_INET) {
addr = (struct sockaddr_in *)dst_addr;
ip = (char *)&addr->sin_addr;
memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: Destination Address [%pI4]: index [%d]\n",
__func__, fw_ddb_entry->ip_addr,
ddb_entry->fw_ddb_index));
} else if (dst_addr->sa_family == AF_INET6) {
addr6 = (struct sockaddr_in6 *)dst_addr;
ip = (char *)&addr6->sin6_addr;
memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: Destination Address [%pI6]: index [%d]\n",
__func__, fw_ddb_entry->ip_addr,
ddb_entry->fw_ddb_index));
} else {
ql4_printk(KERN_ERR, ha,
"%s: Failed to get IP Address\n",
__func__);
rval = -EINVAL;
goto exit_set_param;
}
/* CHAP */
if (sess->username != NULL && sess->password != NULL) {
if (strlen(sess->username) && strlen(sess->password)) {
iscsi_opts |= BIT_7;
rval = qla4xxx_get_chap_index(ha, sess->username,
sess->password,
LOCAL_CHAP, &idx);
if (rval)
goto exit_set_param;
fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
}
}
if (sess->username_in != NULL && sess->password_in != NULL) {
/* Check if BIDI CHAP */
if (strlen(sess->username_in) && strlen(sess->password_in)) {
iscsi_opts |= BIT_4;
rval = qla4xxx_get_chap_index(ha, sess->username_in,
sess->password_in,
BIDI_CHAP, &idx);
if (rval)
goto exit_set_param;
}
}
if (sess->initial_r2t_en)
iscsi_opts |= BIT_10;
if (sess->imm_data_en)
iscsi_opts |= BIT_11;
fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
if (conn->max_recv_dlength)
fw_ddb_entry->iscsi_max_rcv_data_seg_len =
__constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
if (sess->max_r2t)
fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
if (sess->first_burst)
fw_ddb_entry->iscsi_first_burst_len =
__constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
if (sess->max_burst)
fw_ddb_entry->iscsi_max_burst_len =
__constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
if (sess->time2wait)
fw_ddb_entry->iscsi_def_time2wait =
cpu_to_le16(sess->time2wait);
if (sess->time2retain)
fw_ddb_entry->iscsi_def_time2retain =
cpu_to_le16(sess->time2retain);
status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
fw_ddb_entry_dma, mbx_sts);
if (status != QLA_SUCCESS)
rval = -EINVAL;
exit_set_param:
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
exit_set_param_no_free:
return rval;
}
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma)
{
int status = QLA_SUCCESS;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
mbox_cmd[1] = fw_ddb_index;
mbox_cmd[2] = LSDW(stats_dma);
mbox_cmd[3] = MSDW(stats_dma);
mbox_cmd[4] = stats_size;
status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha,
"%s: MBOX_CMD_GET_MANAGEMENT_DATA "
"failed w/ status %04X\n", __func__,
mbox_sts[0]));
}
return status;
}
int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
uint32_t ip_idx, uint32_t *sts)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
mbox_cmd[1] = acb_idx;
mbox_cmd[2] = ip_idx;
status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: "
"MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
"status %04X\n", __func__, mbox_sts[0]));
}
memcpy(sts, mbox_sts, sizeof(mbox_sts));
return status;
}
int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
uint32_t offset, uint32_t size)
{
int status = QLA_SUCCESS;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
mbox_cmd[1] = LSDW(nvram_dma);
mbox_cmd[2] = MSDW(nvram_dma);
mbox_cmd[3] = offset;
mbox_cmd[4] = size;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
&mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
"status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
}
return status;
}
int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
uint32_t offset, uint32_t size)
{
int status = QLA_SUCCESS;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
mbox_cmd[1] = LSDW(nvram_dma);
mbox_cmd[2] = MSDW(nvram_dma);
mbox_cmd[3] = offset;
mbox_cmd[4] = size;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
&mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
"status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
}
return status;
}
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1)
{
int status = QLA_SUCCESS;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
mbox_cmd[3] = region;
mbox_cmd[4] = field0;
mbox_cmd[5] = field1;
status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
&mbox_sts[0]);
if (status != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
"status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
}
return status;
}
| gpl-2.0 |
DirtyUnicorns-Ports/android_kernel_samsung_jf | sound/soc/au1x/db1000.c | 5076 | 1548 | /*
* DB1000/DB1500/DB1100 ASoC audio fabric support code.
*
* (c) 2011 Manuel Lauss <manuel.lauss@googlemail.com>
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
#include "psc.h"
static struct snd_soc_dai_link db1000_ac97_dai = {
.name = "AC97",
.stream_name = "AC97 HiFi",
.codec_dai_name = "ac97-hifi",
.cpu_dai_name = "alchemy-ac97c",
.platform_name = "alchemy-pcm-dma.0",
.codec_name = "ac97-codec",
};
static struct snd_soc_card db1000_ac97 = {
.name = "DB1000_AC97",
.owner = THIS_MODULE,
.dai_link = &db1000_ac97_dai,
.num_links = 1,
};
static int __devinit db1000_audio_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &db1000_ac97;
card->dev = &pdev->dev;
return snd_soc_register_card(card);
}
static int __devexit db1000_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
static struct platform_driver db1000_audio_driver = {
.driver = {
.name = "db1000-audio",
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = db1000_audio_probe,
.remove = __devexit_p(db1000_audio_remove),
};
module_platform_driver(db1000_audio_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DB1000/DB1500/DB1100 ASoC audio");
MODULE_AUTHOR("Manuel Lauss");
| gpl-2.0 |
FenomenalSabderMOD/MOTOE | sound/soc/au1x/db1000.c | 5076 | 1548 | /*
* DB1000/DB1500/DB1100 ASoC audio fabric support code.
*
* (c) 2011 Manuel Lauss <manuel.lauss@googlemail.com>
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
#include "psc.h"
static struct snd_soc_dai_link db1000_ac97_dai = {
.name = "AC97",
.stream_name = "AC97 HiFi",
.codec_dai_name = "ac97-hifi",
.cpu_dai_name = "alchemy-ac97c",
.platform_name = "alchemy-pcm-dma.0",
.codec_name = "ac97-codec",
};
static struct snd_soc_card db1000_ac97 = {
.name = "DB1000_AC97",
.owner = THIS_MODULE,
.dai_link = &db1000_ac97_dai,
.num_links = 1,
};
static int __devinit db1000_audio_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &db1000_ac97;
card->dev = &pdev->dev;
return snd_soc_register_card(card);
}
static int __devexit db1000_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
static struct platform_driver db1000_audio_driver = {
.driver = {
.name = "db1000-audio",
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = db1000_audio_probe,
.remove = __devexit_p(db1000_audio_remove),
};
module_platform_driver(db1000_audio_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DB1000/DB1500/DB1100 ASoC audio");
MODULE_AUTHOR("Manuel Lauss");
| gpl-2.0 |
jrior001/android_kernel_oneplus_msm8974 | sound/soc/blackfin/bf5xx-tdm-pcm.c | 5076 | 9211 | /*
* File: sound/soc/blackfin/bf5xx-tdm-pcm.c
* Author: Barry Song <Barry.Song@analog.com>
*
* Created: Tue June 06 2009
* Description: DMA driver for tdm codec
*
* Modified:
* Copyright 2009 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/dma.h>
#include "bf5xx-tdm-pcm.h"
#include "bf5xx-tdm.h"
#include "bf5xx-sport.h"
#define PCM_BUFFER_MAX 0x8000
#define FRAGMENT_SIZE_MIN (4*1024)
#define FRAGMENTS_MIN 2
#define FRAGMENTS_MAX 32
static void bf5xx_dma_irq(void *data)
{
struct snd_pcm_substream *pcm = data;
snd_pcm_period_elapsed(pcm);
}
static const struct snd_pcm_hardware bf5xx_pcm_hardware = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 8,
.buffer_bytes_max = PCM_BUFFER_MAX,
.period_bytes_min = FRAGMENT_SIZE_MIN,
.period_bytes_max = PCM_BUFFER_MAX/2,
.periods_min = FRAGMENTS_MIN,
.periods_max = FRAGMENTS_MAX,
};
static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
snd_pcm_lib_malloc_pages(substream, size * 4);
return 0;
}
static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
int fragsize_bytes = frames_to_bytes(runtime, runtime->period_size);
fragsize_bytes /= runtime->channels;
/* inflate the fragsize to match the dma width of SPORT */
fragsize_bytes *= 8;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
sport_config_tx_dma(sport, runtime->dma_area,
runtime->periods, fragsize_bytes);
} else {
sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
sport_config_rx_dma(sport, runtime->dma_area,
runtime->periods, fragsize_bytes);
}
return 0;
}
static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
int ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sport_tx_start(sport);
else
sport_rx_start(sport);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sport_tx_stop(sport);
else
sport_rx_stop(sport);
break;
default:
ret = -EINVAL;
}
return ret;
}
static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
unsigned int diff;
snd_pcm_uframes_t frames;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
diff = sport_curr_offset_tx(sport);
frames = diff / (8*4); /* 32 bytes per frame */
} else {
diff = sport_curr_offset_rx(sport);
frames = diff / (8*4);
}
return frames;
}
static int bf5xx_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_dma_buffer *buf = &substream->dma_buffer;
int ret = 0;
snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware);
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
goto out;
if (sport_handle != NULL) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
sport_handle->tx_buf = buf->area;
else
sport_handle->rx_buf = buf->area;
runtime->private_data = sport_handle;
} else {
pr_err("sport_handle is NULL\n");
ret = -ENODEV;
}
out:
return ret;
}
static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
snd_pcm_uframes_t pos, void *buf, snd_pcm_uframes_t count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sport_device *sport = runtime->private_data;
struct bf5xx_tdm_port *tdm_port = sport->private_data;
unsigned int *src;
unsigned int *dst;
int i;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
src = buf;
dst = (unsigned int *)substream->runtime->dma_area;
dst += pos * 8;
while (count--) {
for (i = 0; i < substream->runtime->channels; i++)
*(dst + tdm_port->tx_map[i]) = *src++;
dst += 8;
}
} else {
src = (unsigned int *)substream->runtime->dma_area;
dst = buf;
src += pos * 8;
while (count--) {
for (i = 0; i < substream->runtime->channels; i++)
*dst++ = *(src + tdm_port->rx_map[i]);
src += 8;
}
}
return 0;
}
static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
{
unsigned char *buf = substream->runtime->dma_area;
buf += pos * 8 * 4;
memset(buf, '\0', count * 8 * 4);
return 0;
}
struct snd_pcm_ops bf5xx_pcm_tdm_ops = {
.open = bf5xx_pcm_open,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = bf5xx_pcm_hw_params,
.hw_free = bf5xx_pcm_hw_free,
.prepare = bf5xx_pcm_prepare,
.trigger = bf5xx_pcm_trigger,
.pointer = bf5xx_pcm_pointer,
.copy = bf5xx_pcm_copy,
.silence = bf5xx_pcm_silence,
};
static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
buf->area = dma_alloc_coherent(pcm->card->dev, size * 4,
&buf->addr, GFP_KERNEL);
if (!buf->area) {
pr_err("Failed to allocate dma memory - Please increase uncached DMA memory region\n");
return -ENOMEM;
}
buf->bytes = size;
return 0;
}
static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_coherent(NULL, buf->bytes, buf->area, 0);
buf->area = NULL;
}
}
static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
static int bf5xx_pcm_tdm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret = 0;
if (!card->dev->dma_mask)
card->dev->dma_mask = &bf5xx_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
goto out;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
goto out;
}
out:
return ret;
}
static struct snd_soc_platform_driver bf5xx_tdm_soc_platform = {
.ops = &bf5xx_pcm_tdm_ops,
.pcm_new = bf5xx_pcm_tdm_new,
.pcm_free = bf5xx_pcm_free_dma_buffers,
};
static int __devinit bf5xx_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &bf5xx_tdm_soc_platform);
}
static int __devexit bf5xx_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver bfin_tdm_driver = {
.driver = {
.name = "bfin-tdm-pcm-audio",
.owner = THIS_MODULE,
},
.probe = bf5xx_soc_platform_probe,
.remove = __devexit_p(bf5xx_soc_platform_remove),
};
module_platform_driver(bfin_tdm_driver);
MODULE_AUTHOR("Barry Song");
MODULE_DESCRIPTION("ADI Blackfin TDM PCM DMA module");
MODULE_LICENSE("GPL");
| gpl-2.0 |
SlimRoms/kernel_sony_msm8974 | sound/soc/au1x/db1000.c | 5076 | 1548 | /*
* DB1000/DB1500/DB1100 ASoC audio fabric support code.
*
* (c) 2011 Manuel Lauss <manuel.lauss@googlemail.com>
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
#include "psc.h"
static struct snd_soc_dai_link db1000_ac97_dai = {
.name = "AC97",
.stream_name = "AC97 HiFi",
.codec_dai_name = "ac97-hifi",
.cpu_dai_name = "alchemy-ac97c",
.platform_name = "alchemy-pcm-dma.0",
.codec_name = "ac97-codec",
};
static struct snd_soc_card db1000_ac97 = {
.name = "DB1000_AC97",
.owner = THIS_MODULE,
.dai_link = &db1000_ac97_dai,
.num_links = 1,
};
static int __devinit db1000_audio_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &db1000_ac97;
card->dev = &pdev->dev;
return snd_soc_register_card(card);
}
static int __devexit db1000_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
static struct platform_driver db1000_audio_driver = {
.driver = {
.name = "db1000-audio",
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = db1000_audio_probe,
.remove = __devexit_p(db1000_audio_remove),
};
module_platform_driver(db1000_audio_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DB1000/DB1500/DB1100 ASoC audio");
MODULE_AUTHOR("Manuel Lauss");
| gpl-2.0 |
emceethemouth/kernel_ghost | arch/sparc/kernel/iommu.c | 6868 | 21361 | /* iommu.c: Generic sparc64 IOMMU support.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
#include <asm/iommu.h>
#include "iommu_common.h"
#define STC_CTXMATCH_ADDR(STC, CTX) \
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
#define STC_FLUSHFLAG_INIT(STC) \
(*((STC)->strbuf_flushflag) = 0UL)
#define STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
#define iommu_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define iommu_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
static void iommu_flushall(struct iommu *iommu)
{
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
unsigned long tag;
int entry;
tag = iommu->iommu_tags;
for (entry = 0; entry < 16; entry++) {
iommu_write(tag, 0);
tag += 8;
}
/* Ensure completion of previous PIO writes. */
(void) iommu_read(iommu->write_complete_reg);
}
}
#define IOPTE_CONSISTENT(CTX) \
(IOPTE_VALID | IOPTE_CACHE | \
(((CTX) << 47) & IOPTE_CONTEXT))
#define IOPTE_STREAMING(CTX) \
(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
/* Existing mappings are never marked invalid, instead they
* are pointed to a dummy page.
*/
#define IOPTE_IS_DUMMY(iommu, iopte) \
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
{
unsigned long val = iopte_val(*iopte);
val &= ~IOPTE_PAGE;
val |= iommu->dummy_page_pa;
iopte_val(*iopte) = val;
}
/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
* facility it must all be done in one pass while under the iommu lock.
*
* On sun4u platforms, we only flush the IOMMU once every time we've passed
* over the entire page table doing allocations. Therefore we only ever advance
* the hint and cannot backtrack it.
*/
unsigned long iommu_range_alloc(struct device *dev,
struct iommu *iommu,
unsigned long npages,
unsigned long *handle)
{
unsigned long n, end, start, limit, boundary_size;
struct iommu_arena *arena = &iommu->arena;
int pass = 0;
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
if (handle && *handle)
start = *handle;
else
start = arena->hint;
limit = arena->limit;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the beginning and flush.
*/
if (start >= limit) {
start = 0;
if (iommu->flush_all)
iommu->flush_all(iommu);
}
again:
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1 << IO_PAGE_SHIFT);
else
boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
n = iommu_area_alloc(arena->map, limit, start, npages,
iommu->page_table_map_base >> IO_PAGE_SHIFT,
boundary_size >> IO_PAGE_SHIFT, 0);
if (n == -1) {
if (likely(pass < 1)) {
/* First failure, rescan from the beginning. */
start = 0;
if (iommu->flush_all)
iommu->flush_all(iommu);
pass++;
goto again;
} else {
/* Second failure, give up */
return DMA_ERROR_CODE;
}
}
end = n + npages;
arena->hint = end;
/* Update handle for SG allocations */
if (handle)
*handle = end;
return n;
}
void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long entry;
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
bitmap_clear(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
{
unsigned long i, order, sz, num_tsb_entries;
struct page *page;
num_tsb_entries = tsbsize / sizeof(iopte_t);
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
iommu->page_table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->arena.map) {
printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
memset(iommu->arena.map, 0, sz);
iommu->arena.limit = num_tsb_entries;
if (tlb_type != hypervisor)
iommu->flush_all = iommu_flushall;
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
goto out_free_map;
}
iommu->dummy_page = (unsigned long) page_address(page);
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
/* Now allocate and setup the IOMMU page table itself. */
order = get_order(tsbsize);
page = alloc_pages_node(numa_node, GFP_KERNEL, order);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
goto out_free_dummy_page;
}
iommu->page_table = (iopte_t *)page_address(page);
for (i = 0; i < num_tsb_entries; i++)
iopte_make_dummy(iommu, &iommu->page_table[i]);
return 0;
out_free_dummy_page:
free_page(iommu->dummy_page);
iommu->dummy_page = 0UL;
out_free_map:
kfree(iommu->arena.map);
iommu->arena.map = NULL;
return -ENOMEM;
}
static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
entry = iommu_range_alloc(dev, iommu, npages, NULL);
if (unlikely(entry == DMA_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
}
static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
if (unlikely(n == IOMMU_NUM_CTXS)) {
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
if (unlikely(n == lowest)) {
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
n = 0;
}
}
if (n)
__set_bit(n, iommu->ctx_bitmap);
return n;
}
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
if (ctx < iommu->ctx_lowest_free)
iommu->ctx_lowest_free = ctx;
}
}
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
unsigned long flags, order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
iopte_t *iopte;
void *ret;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
return NULL;
first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
*dma_addrp = (iommu->page_table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
first_page = __pa(first_page);
while (npages--) {
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
IOPTE_WRITE |
(first_page & IOPTE_PAGE));
iopte++;
first_page += IO_PAGE_SIZE;
}
return ret;
}
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma,
struct dma_attrs *attrs)
{
struct iommu *iommu;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
iommu_range_free(iommu, dvma, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(dev, iommu, npages);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(!base))
goto bad;
bus_addr = (iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
iopte_protection = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | base_paddr;
return ret;
bad:
iommu_free_ctx(iommu, ctx);
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
u32 vaddr, unsigned long ctx, unsigned long npages,
enum dma_data_direction direction)
{
int limit;
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
u64 val;
flushreg = strbuf->strbuf_ctxflush;
matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
iommu_write(flushreg, ctx);
val = iommu_read(matchreg);
val &= 0xffff;
if (!val)
goto do_flush_sync;
while (val) {
if (val & 0x1)
iommu_write(flushreg, ctx);
val >>= 1;
}
val = iommu_read(matchreg);
if (unlikely(val)) {
printk(KERN_WARNING "strbuf_flush: ctx flush "
"timeout matchreg[%llx] ctx[%lx]\n",
val, ctx);
goto do_page_flush;
}
} else {
unsigned long i;
do_page_flush:
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
iommu_write(strbuf->strbuf_pflush, vaddr);
}
do_flush_sync:
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
* to be performed.
*/
if (direction == DMA_TO_DEVICE)
return;
STC_FLUSHFLAG_INIT(strbuf);
iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) iommu_read(iommu->write_complete_reg);
limit = 100000;
while (!STC_FLUSHFLAG_SET(strbuf)) {
limit--;
if (!limit)
break;
udelay(1);
rmb();
}
if (!limit)
printk(KERN_WARNING "strbuf_flush: flushflag timeout "
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
vaddr, ctx, npages);
}
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, ctx, i;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
/* Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, bus_addr, ctx,
npages, direction);
/* Step 2: Clear out TSB entries. */
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
iommu_range_free(iommu, bus_addr, npages);
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
unsigned long seg_boundary_size;
int outcount, incount, i;
struct strbuf *strbuf;
struct iommu *iommu;
unsigned long base_shift;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
return 0;
spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
prot = IOPTE_STREAMING(ctx);
else
prot = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
prot |= IOPTE_WRITE;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
iopte_val(*base) = prot | paddr;
base++;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size) ||
(is_span_boundary(out_entry, base_shift,
seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages, entry, j;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
iommu_range_free(iommu, vaddr, npages);
entry = (vaddr - iommu->page_table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
return ctx;
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
struct strbuf *strbuf;
struct iommu *iommu;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
ctx = fetch_sg_ctx(iommu, sglist);
spin_lock_irqsave(&iommu->lock, flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
iopte_t *base;
int i;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
dma_handle &= IO_PAGE_MASK;
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
sg = sg_next(sg);
}
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_single_for_cpu(struct device *dev,
dma_addr_t bus_addr, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
struct scatterlist *sg, *sgprv;
u32 bus_addr;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
sgprv = NULL;
for_each_sg(sglist, sg, nelems, i) {
if (sg->dma_length == 0)
break;
sgprv = sg;
}
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
.unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
};
struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
int dma_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask;
if (device_mask >= (1UL << 32UL))
return 0;
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
return pci64_dma_supported(to_pci_dev(dev), device_mask);
#endif
return 0;
}
EXPORT_SYMBOL(dma_supported);
| gpl-2.0 |
Psycho666/Simplicity_trlte_kernel | drivers/media/rc/keymaps/rc-apac-viewcomp.c | 7636 | 2143 | /* apac-viewcomp.h - Keytable for apac_viewcomp Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Attila Kondoros <attila.kondoros@chello.hu> */
static struct rc_map_table apac_viewcomp[] = {
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x00, KEY_0 },
{ 0x17, KEY_LAST }, /* +100 */
{ 0x0a, KEY_LIST }, /* recall */
{ 0x1c, KEY_TUNER }, /* TV/FM */
{ 0x15, KEY_SEARCH }, /* scan */
{ 0x12, KEY_POWER }, /* power */
{ 0x1f, KEY_VOLUMEDOWN }, /* vol up */
{ 0x1b, KEY_VOLUMEUP }, /* vol down */
{ 0x1e, KEY_CHANNELDOWN }, /* chn up */
{ 0x1a, KEY_CHANNELUP }, /* chn down */
{ 0x11, KEY_VIDEO }, /* video */
{ 0x0f, KEY_ZOOM }, /* full screen */
{ 0x13, KEY_MUTE }, /* mute/unmute */
{ 0x10, KEY_TEXT }, /* min */
{ 0x0d, KEY_STOP }, /* freeze */
{ 0x0e, KEY_RECORD }, /* record */
{ 0x1d, KEY_PLAYPAUSE }, /* stop */
{ 0x19, KEY_PLAY }, /* play */
{ 0x16, KEY_GOTO }, /* osd */
{ 0x14, KEY_REFRESH }, /* default */
{ 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
{ 0x18, KEY_KPMINUS }, /* fine tune <<<< */
};
static struct rc_map_list apac_viewcomp_map = {
.map = {
.scan = apac_viewcomp,
.size = ARRAY_SIZE(apac_viewcomp),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_APAC_VIEWCOMP,
}
};
static int __init init_rc_map_apac_viewcomp(void)
{
return rc_map_register(&apac_viewcomp_map);
}
static void __exit exit_rc_map_apac_viewcomp(void)
{
rc_map_unregister(&apac_viewcomp_map);
}
module_init(init_rc_map_apac_viewcomp)
module_exit(exit_rc_map_apac_viewcomp)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
dianlujitao/android_kernel_huawei_msm8610 | drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c | 7636 | 1897 | /* pinnacle-pctv-hd.h - Keytable for pinnacle_pctv_hd Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Pinnacle PCTV HD 800i mini remote */
static struct rc_map_table pinnacle_pctv_hd[] = {
/* Key codes for the tiny Pinnacle remote*/
{ 0x0700, KEY_MUTE },
{ 0x0701, KEY_MENU }, /* Pinnacle logo */
{ 0x0739, KEY_POWER },
{ 0x0703, KEY_VOLUMEUP },
{ 0x0705, KEY_OK },
{ 0x0709, KEY_VOLUMEDOWN },
{ 0x0706, KEY_CHANNELUP },
{ 0x070c, KEY_CHANNELDOWN },
{ 0x070f, KEY_1 },
{ 0x0715, KEY_2 },
{ 0x0710, KEY_3 },
{ 0x0718, KEY_4 },
{ 0x071b, KEY_5 },
{ 0x071e, KEY_6 },
{ 0x0711, KEY_7 },
{ 0x0721, KEY_8 },
{ 0x0712, KEY_9 },
{ 0x0727, KEY_0 },
{ 0x0724, KEY_ZOOM }, /* 'Square' key */
{ 0x072a, KEY_SUBTITLE }, /* 'T' key */
{ 0x072d, KEY_REWIND },
{ 0x0730, KEY_PLAYPAUSE },
{ 0x0733, KEY_FASTFORWARD },
{ 0x0736, KEY_RECORD },
{ 0x073c, KEY_STOP },
{ 0x073f, KEY_HELP }, /* '?' key */
};
static struct rc_map_list pinnacle_pctv_hd_map = {
.map = {
.scan = pinnacle_pctv_hd,
.size = ARRAY_SIZE(pinnacle_pctv_hd),
.rc_type = RC_TYPE_RC5,
.name = RC_MAP_PINNACLE_PCTV_HD,
}
};
static int __init init_rc_map_pinnacle_pctv_hd(void)
{
return rc_map_register(&pinnacle_pctv_hd_map);
}
static void __exit exit_rc_map_pinnacle_pctv_hd(void)
{
rc_map_unregister(&pinnacle_pctv_hd_map);
}
module_init(init_rc_map_pinnacle_pctv_hd)
module_exit(exit_rc_map_pinnacle_pctv_hd)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
RockchipOpensourceCommunity/px2-android-kernel-3.0 | drivers/staging/comedi/drivers/aio_aio12_8.c | 8148 | 6077 | /*
comedi/drivers/aio_aio12_8.c
Driver for Access I/O Products PC-104 AIO12-8 Analog I/O Board
Copyright (C) 2006 C&C Technologies, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: aio_aio12_8
Description: Access I/O Products PC-104 AIO12-8 Analog I/O Board
Author: Pablo Mejia <pablo.mejia@cctechnol.com>
Devices:
[Access I/O] PC-104 AIO12-8
Status: experimental
Configuration Options:
[0] - I/O port base address
Notes:
Only synchronous operations are supported.
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#include "8255.h"
#define AIO12_8_STATUS 0x00
#define AIO12_8_INTERRUPT 0x01
#define AIO12_8_ADC 0x02
#define AIO12_8_DAC_0 0x04
#define AIO12_8_DAC_1 0x06
#define AIO12_8_DAC_2 0x08
#define AIO12_8_DAC_3 0x0A
#define AIO12_8_COUNTER_0 0x0C
#define AIO12_8_COUNTER_1 0x0D
#define AIO12_8_COUNTER_2 0x0E
#define AIO12_8_COUNTER_CONTROL 0x0F
#define AIO12_8_DIO_0 0x10
#define AIO12_8_DIO_1 0x11
#define AIO12_8_DIO_2 0x12
#define AIO12_8_DIO_STATUS 0x13
#define AIO12_8_DIO_CONTROL 0x14
#define AIO12_8_ADC_TRIGGER_CONTROL 0x15
#define AIO12_8_TRIGGER 0x16
#define AIO12_8_POWER 0x17
#define STATUS_ADC_EOC 0x80
#define ADC_MODE_NORMAL 0x00
#define ADC_MODE_INTERNAL_CLOCK 0x40
#define ADC_MODE_STANDBY 0x80
#define ADC_MODE_POWERDOWN 0xC0
#define DAC_ENABLE 0x18
struct aio12_8_boardtype {
const char *name;
};
static const struct aio12_8_boardtype board_types[] = {
{
.name = "aio_aio12_8"},
};
#define thisboard ((const struct aio12_8_boardtype *) dev->board_ptr)
struct aio12_8_private {
unsigned int ao_readback[4];
};
#define devpriv ((struct aio12_8_private *) dev->private)
static int aio_aio12_8_ai_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
unsigned char control =
ADC_MODE_NORMAL |
(CR_RANGE(insn->chanspec) << 3) | CR_CHAN(insn->chanspec);
/* read status to clear EOC latch */
inb(dev->iobase + AIO12_8_STATUS);
for (n = 0; n < insn->n; n++) {
int timeout = 5;
/* Setup and start conversion */
outb(control, dev->iobase + AIO12_8_ADC);
/* Wait for conversion to complete */
while (timeout &&
!(inb(dev->iobase + AIO12_8_STATUS) & STATUS_ADC_EOC)) {
timeout--;
printk(KERN_ERR "timeout %d\n", timeout);
udelay(1);
}
if (timeout == 0) {
comedi_error(dev, "ADC timeout");
return -EIO;
}
data[n] = inw(dev->iobase + AIO12_8_ADC) & 0x0FFF;
}
return n;
}
static int aio_aio12_8_ao_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i;
int val = devpriv->ao_readback[CR_CHAN(insn->chanspec)];
for (i = 0; i < insn->n; i++)
data[i] = val;
return insn->n;
}
static int aio_aio12_8_ao_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i;
int chan = CR_CHAN(insn->chanspec);
unsigned long port = dev->iobase + AIO12_8_DAC_0 + (2 * chan);
/* enable DACs */
outb(0x01, dev->iobase + DAC_ENABLE);
for (i = 0; i < insn->n; i++) {
outb(data[i] & 0xFF, port); /* LSB */
outb((data[i] >> 8) & 0x0F, port + 1); /* MSB */
devpriv->ao_readback[chan] = data[i];
}
return insn->n;
}
static const struct comedi_lrange range_aio_aio12_8 = {
4,
{
UNI_RANGE(5),
BIP_RANGE(5),
UNI_RANGE(10),
BIP_RANGE(10),
}
};
static int aio_aio12_8_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
int iobase;
struct comedi_subdevice *s;
iobase = it->options[0];
if (!request_region(iobase, 24, "aio_aio12_8")) {
printk(KERN_ERR "I/O port conflict");
return -EIO;
}
dev->board_name = thisboard->name;
dev->iobase = iobase;
if (alloc_private(dev, sizeof(struct aio12_8_private)) < 0)
return -ENOMEM;
if (alloc_subdevices(dev, 3) < 0)
return -ENOMEM;
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = 8;
s->maxdata = (1 << 12) - 1;
s->range_table = &range_aio_aio12_8;
s->insn_read = aio_aio12_8_ai_read;
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_DIFF;
s->n_chan = 4;
s->maxdata = (1 << 12) - 1;
s->range_table = &range_aio_aio12_8;
s->insn_read = aio_aio12_8_ao_read;
s->insn_write = aio_aio12_8_ao_write;
s = &dev->subdevices[2];
subdev_8255_init(dev, s, NULL, dev->iobase + AIO12_8_DIO_0);
return 0;
}
static int aio_aio12_8_detach(struct comedi_device *dev)
{
subdev_8255_cleanup(dev, &dev->subdevices[2]);
if (dev->iobase)
release_region(dev->iobase, 24);
return 0;
}
static struct comedi_driver driver_aio_aio12_8 = {
.driver_name = "aio_aio12_8",
.module = THIS_MODULE,
.attach = aio_aio12_8_attach,
.detach = aio_aio12_8_detach,
.board_name = &board_types[0].name,
.num_names = 1,
.offset = sizeof(struct aio12_8_boardtype),
};
static int __init driver_aio_aio12_8_init_module(void)
{
return comedi_driver_register(&driver_aio_aio12_8);
}
static void __exit driver_aio_aio12_8_cleanup_module(void)
{
comedi_driver_unregister(&driver_aio_aio12_8);
}
module_init(driver_aio_aio12_8_init_module);
module_exit(driver_aio_aio12_8_cleanup_module);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MoKee/android_kernel_zte_x9180 | drivers/media/radio/radio-terratec.c | 9684 | 4471 | /* Terratec ActiveRadio ISA Standalone card driver for Linux radio support
* (c) 1999 R. Offermanns (rolf@offermanns.de)
* based on the aimslab radio driver from M. Kirkwood
* many thanks to Michael Becker and Friedhelm Birth (from TerraTec)
*
*
* History:
* 1999-05-21 First preview release
*
* Notes on the hardware:
* There are two "main" chips on the card:
* - Philips OM5610 (http://www-us.semiconductors.philips.com/acrobat/datasheets/OM5610_2.pdf)
* - Philips SAA6588 (http://www-us.semiconductors.philips.com/acrobat/datasheets/SAA6588_1.pdf)
* (you can get the datasheet at the above links)
*
* Frequency control is done digitally -- ie out(port,encodefreq(95.8));
* Volume Control is done digitally
*
* Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
*/
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
#include <linux/io.h> /* outb, outb_p */
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include "radio-isa.h"
MODULE_AUTHOR("R. Offermans & others");
MODULE_DESCRIPTION("A driver for the TerraTec ActiveRadio Standalone radio card.");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1.99");
/* Note: there seems to be only one possible port (0x590), but without
hardware this is hard to verify. For now, this is the only one we will
support. */
static int io = 0x590;
static int radio_nr = -1;
module_param(radio_nr, int, 0444);
MODULE_PARM_DESC(radio_nr, "Radio device number");
#define WRT_DIS 0x00
#define CLK_OFF 0x00
#define IIC_DATA 0x01
#define IIC_CLK 0x02
#define DATA 0x04
#define CLK_ON 0x08
#define WRT_EN 0x10
static struct radio_isa_card *terratec_alloc(void)
{
return kzalloc(sizeof(struct radio_isa_card), GFP_KERNEL);
}
static int terratec_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol)
{
int i;
if (mute)
vol = 0;
vol = vol + (vol * 32); /* change both channels */
for (i = 0; i < 8; i++) {
if (vol & (0x80 >> i))
outb(0x80, isa->io + 1);
else
outb(0x00, isa->io + 1);
}
return 0;
}
/* this is the worst part in this driver */
/* many more or less strange things are going on here, but hey, it works :) */
static int terratec_s_frequency(struct radio_isa_card *isa, u32 freq)
{
int i;
int p;
int temp;
long rest;
unsigned char buffer[25]; /* we have to bit shift 25 registers */
freq = freq / 160; /* convert the freq. to a nice to handle value */
memset(buffer, 0, sizeof(buffer));
rest = freq * 10 + 10700; /* I once had understood what is going on here */
/* maybe some wise guy (friedhelm?) can comment this stuff */
i = 13;
p = 10;
temp = 102400;
while (rest != 0) {
if (rest % temp == rest)
buffer[i] = 0;
else {
buffer[i] = 1;
rest = rest - temp;
}
i--;
p--;
temp = temp / 2;
}
for (i = 24; i > -1; i--) { /* bit shift the values to the radiocard */
if (buffer[i] == 1) {
outb(WRT_EN | DATA, isa->io);
outb(WRT_EN | DATA | CLK_ON, isa->io);
outb(WRT_EN | DATA, isa->io);
} else {
outb(WRT_EN | 0x00, isa->io);
outb(WRT_EN | 0x00 | CLK_ON, isa->io);
}
}
outb(0x00, isa->io);
return 0;
}
static u32 terratec_g_signal(struct radio_isa_card *isa)
{
/* bit set = no signal present */
return (inb(isa->io) & 2) ? 0 : 0xffff;
}
static const struct radio_isa_ops terratec_ops = {
.alloc = terratec_alloc,
.s_mute_volume = terratec_s_mute_volume,
.s_frequency = terratec_s_frequency,
.g_signal = terratec_g_signal,
};
static const int terratec_ioports[] = { 0x590 };
static struct radio_isa_driver terratec_driver = {
.driver = {
.match = radio_isa_match,
.probe = radio_isa_probe,
.remove = radio_isa_remove,
.driver = {
.name = "radio-terratec",
},
},
.io_params = &io,
.radio_nr_params = &radio_nr,
.io_ports = terratec_ioports,
.num_of_io_ports = ARRAY_SIZE(terratec_ioports),
.region_size = 2,
.card = "TerraTec ActiveRadio",
.ops = &terratec_ops,
.has_stereo = true,
.max_volume = 10,
};
static int __init terratec_init(void)
{
return isa_register_driver(&terratec_driver.driver, 1);
}
static void __exit terratec_exit(void)
{
isa_unregister_driver(&terratec_driver.driver);
}
module_init(terratec_init);
module_exit(terratec_exit);
| gpl-2.0 |
arnoldthebat/linux-stable | arch/powerpc/boot/ugecon.c | 13268 | 2911 | /*
* arch/powerpc/boot/ugecon.c
*
* USB Gecko bootwrapper console.
* Copyright (C) 2008-2009 The GameCube Linux Team
* Copyright (C) 2008,2009 Albert Herranz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*/
#include <stddef.h>
#include "stdio.h"
#include "types.h"
#include "io.h"
#include "ops.h"
#define EXI_CLK_32MHZ 5
#define EXI_CSR 0x00
#define EXI_CSR_CLKMASK (0x7<<4)
#define EXI_CSR_CLK_32MHZ (EXI_CLK_32MHZ<<4)
#define EXI_CSR_CSMASK (0x7<<7)
#define EXI_CSR_CS_0 (0x1<<7) /* Chip Select 001 */
#define EXI_CR 0x0c
#define EXI_CR_TSTART (1<<0)
#define EXI_CR_WRITE (1<<2)
#define EXI_CR_READ_WRITE (2<<2)
#define EXI_CR_TLEN(len) (((len)-1)<<4)
#define EXI_DATA 0x10
/* virtual address base for input/output, retrieved from device tree */
static void *ug_io_base;
static u32 ug_io_transaction(u32 in)
{
u32 *csr_reg = ug_io_base + EXI_CSR;
u32 *data_reg = ug_io_base + EXI_DATA;
u32 *cr_reg = ug_io_base + EXI_CR;
u32 csr, data, cr;
/* select */
csr = EXI_CSR_CLK_32MHZ | EXI_CSR_CS_0;
out_be32(csr_reg, csr);
/* read/write */
data = in;
out_be32(data_reg, data);
cr = EXI_CR_TLEN(2) | EXI_CR_READ_WRITE | EXI_CR_TSTART;
out_be32(cr_reg, cr);
while (in_be32(cr_reg) & EXI_CR_TSTART)
barrier();
/* deselect */
out_be32(csr_reg, 0);
data = in_be32(data_reg);
return data;
}
static int ug_is_txfifo_ready(void)
{
return ug_io_transaction(0xc0000000) & 0x04000000;
}
static void ug_raw_putc(char ch)
{
ug_io_transaction(0xb0000000 | (ch << 20));
}
static void ug_putc(char ch)
{
int count = 16;
if (!ug_io_base)
return;
while (!ug_is_txfifo_ready() && count--)
barrier();
if (count >= 0)
ug_raw_putc(ch);
}
void ug_console_write(const char *buf, int len)
{
char *b = (char *)buf;
while (len--) {
if (*b == '\n')
ug_putc('\r');
ug_putc(*b++);
}
}
static int ug_is_adapter_present(void)
{
if (!ug_io_base)
return 0;
return ug_io_transaction(0x90000000) == 0x04700000;
}
static void *ug_grab_exi_io_base(void)
{
u32 v;
void *devp;
devp = find_node_by_compatible(NULL, "nintendo,flipper-exi");
if (devp == NULL)
goto err_out;
if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v))
goto err_out;
return (void *)v;
err_out:
return NULL;
}
void *ug_probe(void)
{
void *exi_io_base;
int i;
exi_io_base = ug_grab_exi_io_base();
if (!exi_io_base)
return NULL;
/* look for a usbgecko on memcard slots A and B */
for (i = 0; i < 2; i++) {
ug_io_base = exi_io_base + 0x14 * i;
if (ug_is_adapter_present())
break;
}
if (i == 2)
ug_io_base = NULL;
return ug_io_base;
}
| gpl-2.0 |
Myself5/android_kernel_sony_msm8974 | arch/powerpc/boot/ugecon.c | 13268 | 2911 | /*
* arch/powerpc/boot/ugecon.c
*
* USB Gecko bootwrapper console.
* Copyright (C) 2008-2009 The GameCube Linux Team
* Copyright (C) 2008,2009 Albert Herranz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*/
#include <stddef.h>
#include "stdio.h"
#include "types.h"
#include "io.h"
#include "ops.h"
#define EXI_CLK_32MHZ 5
#define EXI_CSR 0x00
#define EXI_CSR_CLKMASK (0x7<<4)
#define EXI_CSR_CLK_32MHZ (EXI_CLK_32MHZ<<4)
#define EXI_CSR_CSMASK (0x7<<7)
#define EXI_CSR_CS_0 (0x1<<7) /* Chip Select 001 */
#define EXI_CR 0x0c
#define EXI_CR_TSTART (1<<0)
#define EXI_CR_WRITE (1<<2)
#define EXI_CR_READ_WRITE (2<<2)
#define EXI_CR_TLEN(len) (((len)-1)<<4)
#define EXI_DATA 0x10
/* virtual address base for input/output, retrieved from device tree */
static void *ug_io_base;
static u32 ug_io_transaction(u32 in)
{
u32 *csr_reg = ug_io_base + EXI_CSR;
u32 *data_reg = ug_io_base + EXI_DATA;
u32 *cr_reg = ug_io_base + EXI_CR;
u32 csr, data, cr;
/* select */
csr = EXI_CSR_CLK_32MHZ | EXI_CSR_CS_0;
out_be32(csr_reg, csr);
/* read/write */
data = in;
out_be32(data_reg, data);
cr = EXI_CR_TLEN(2) | EXI_CR_READ_WRITE | EXI_CR_TSTART;
out_be32(cr_reg, cr);
while (in_be32(cr_reg) & EXI_CR_TSTART)
barrier();
/* deselect */
out_be32(csr_reg, 0);
data = in_be32(data_reg);
return data;
}
static int ug_is_txfifo_ready(void)
{
return ug_io_transaction(0xc0000000) & 0x04000000;
}
static void ug_raw_putc(char ch)
{
ug_io_transaction(0xb0000000 | (ch << 20));
}
static void ug_putc(char ch)
{
int count = 16;
if (!ug_io_base)
return;
while (!ug_is_txfifo_ready() && count--)
barrier();
if (count >= 0)
ug_raw_putc(ch);
}
void ug_console_write(const char *buf, int len)
{
char *b = (char *)buf;
while (len--) {
if (*b == '\n')
ug_putc('\r');
ug_putc(*b++);
}
}
static int ug_is_adapter_present(void)
{
if (!ug_io_base)
return 0;
return ug_io_transaction(0x90000000) == 0x04700000;
}
static void *ug_grab_exi_io_base(void)
{
u32 v;
void *devp;
devp = find_node_by_compatible(NULL, "nintendo,flipper-exi");
if (devp == NULL)
goto err_out;
if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v))
goto err_out;
return (void *)v;
err_out:
return NULL;
}
void *ug_probe(void)
{
void *exi_io_base;
int i;
exi_io_base = ug_grab_exi_io_base();
if (!exi_io_base)
return NULL;
/* look for a usbgecko on memcard slots A and B */
for (i = 0; i < 2; i++) {
ug_io_base = exi_io_base + 0x14 * i;
if (ug_is_adapter_present())
break;
}
if (i == 2)
ug_io_base = NULL;
return ug_io_base;
}
| gpl-2.0 |
alexandrinno/android_device_sony_D2403 | arch/powerpc/boot/simple_alloc.c | 14548 | 3617 | /*
* Implement primitive realloc(3) functionality.
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
* 2006 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <stddef.h>
#include "types.h"
#include "page.h"
#include "string.h"
#include "ops.h"
#define ENTRY_BEEN_USED 0x01
#define ENTRY_IN_USE 0x02
static struct alloc_info {
unsigned long flags;
unsigned long base;
unsigned long size;
} *alloc_tbl;
static unsigned long tbl_entries;
static unsigned long alloc_min;
static unsigned long next_base;
static unsigned long space_left;
/*
* First time an entry is used, its base and size are set.
* An entry can be freed and re-malloc'd but its base & size don't change.
* Should be smart enough for needs of bootwrapper.
*/
static void *simple_malloc(unsigned long size)
{
unsigned long i;
struct alloc_info *p = alloc_tbl;
if (size == 0)
goto err_out;
size = _ALIGN_UP(size, alloc_min);
for (i=0; i<tbl_entries; i++, p++)
if (!(p->flags & ENTRY_BEEN_USED)) { /* never been used */
if (size <= space_left) {
p->base = next_base;
p->size = size;
p->flags = ENTRY_BEEN_USED | ENTRY_IN_USE;
next_base += size;
space_left -= size;
return (void *)p->base;
}
goto err_out; /* not enough space left */
}
/* reuse an entry keeping same base & size */
else if (!(p->flags & ENTRY_IN_USE) && (size <= p->size)) {
p->flags |= ENTRY_IN_USE;
return (void *)p->base;
}
err_out:
return NULL;
}
static struct alloc_info *simple_find_entry(void *ptr)
{
unsigned long i;
struct alloc_info *p = alloc_tbl;
for (i=0; i<tbl_entries; i++,p++) {
if (!(p->flags & ENTRY_BEEN_USED))
break;
if ((p->flags & ENTRY_IN_USE) &&
(p->base == (unsigned long)ptr))
return p;
}
return NULL;
}
static void simple_free(void *ptr)
{
struct alloc_info *p = simple_find_entry(ptr);
if (p != NULL)
p->flags &= ~ENTRY_IN_USE;
}
/*
* Change size of area pointed to by 'ptr' to 'size'.
* If 'ptr' is NULL, then its a malloc(). If 'size' is 0, then its a free().
* 'ptr' must be NULL or a pointer to a non-freed area previously returned by
* simple_realloc() or simple_malloc().
*/
static void *simple_realloc(void *ptr, unsigned long size)
{
struct alloc_info *p;
void *new;
if (size == 0) {
simple_free(ptr);
return NULL;
}
if (ptr == NULL)
return simple_malloc(size);
p = simple_find_entry(ptr);
if (p == NULL) /* ptr not from simple_malloc/simple_realloc */
return NULL;
if (size <= p->size) /* fits in current block */
return ptr;
new = simple_malloc(size);
memcpy(new, ptr, p->size);
simple_free(ptr);
return new;
}
/*
* Returns addr of first byte after heap so caller can see if it took
* too much space. If so, change args & try again.
*/
void *simple_alloc_init(char *base, unsigned long heap_size,
unsigned long granularity, unsigned long max_allocs)
{
unsigned long heap_base, tbl_size;
heap_size = _ALIGN_UP(heap_size, granularity);
alloc_min = granularity;
tbl_entries = max_allocs;
tbl_size = tbl_entries * sizeof(struct alloc_info);
alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8);
memset(alloc_tbl, 0, tbl_size);
heap_base = _ALIGN_UP((unsigned long)alloc_tbl + tbl_size, alloc_min);
next_base = heap_base;
space_left = heap_size;
platform_ops.malloc = simple_malloc;
platform_ops.free = simple_free;
platform_ops.realloc = simple_realloc;
return (void *)(heap_base + heap_size);
}
| gpl-2.0 |
krieger-od/linux | kernel/power/suspend.c | 213 | 13483 | /*
* kernel/power/suspend.c - Suspend to RAM and standby functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is released under the GPLv2.
*/
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/syscalls.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/ftrace.h>
#include <trace/events/power.h>
#include <linux/compiler.h>
#include <linux/moduleparam.h>
#include "power.h"
const char *pm_labels[] = { "mem", "standby", "freeze", NULL };
const char *pm_states[PM_SUSPEND_MAX];
static const struct platform_suspend_ops *suspend_ops;
static const struct platform_freeze_ops *freeze_ops;
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
enum freeze_state __read_mostly suspend_freeze_state;
static DEFINE_SPINLOCK(suspend_freeze_lock);
void freeze_set_ops(const struct platform_freeze_ops *ops)
{
lock_system_sleep();
freeze_ops = ops;
unlock_system_sleep();
}
static void freeze_begin(void)
{
suspend_freeze_state = FREEZE_STATE_NONE;
}
static void freeze_enter(void)
{
spin_lock_irq(&suspend_freeze_lock);
if (pm_wakeup_pending())
goto out;
suspend_freeze_state = FREEZE_STATE_ENTER;
spin_unlock_irq(&suspend_freeze_lock);
get_online_cpus();
cpuidle_resume();
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
pr_debug("PM: suspend-to-idle\n");
/* Make the current CPU wait so it can enter the idle loop too. */
wait_event(suspend_freeze_wait_head,
suspend_freeze_state == FREEZE_STATE_WAKE);
pr_debug("PM: resume from suspend-to-idle\n");
cpuidle_pause();
put_online_cpus();
spin_lock_irq(&suspend_freeze_lock);
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
}
void freeze_wake(void)
{
unsigned long flags;
spin_lock_irqsave(&suspend_freeze_lock, flags);
if (suspend_freeze_state > FREEZE_STATE_NONE) {
suspend_freeze_state = FREEZE_STATE_WAKE;
wake_up(&suspend_freeze_wait_head);
}
spin_unlock_irqrestore(&suspend_freeze_lock, flags);
}
EXPORT_SYMBOL_GPL(freeze_wake);
static bool valid_state(suspend_state_t state)
{
/*
* PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
* support and need to be valid to the low level
* implementation, no valid callback implies that none are valid.
*/
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
}
/*
* If this is set, the "mem" label always corresponds to the deepest sleep state
* available, the "standby" label corresponds to the second deepest sleep state
* available (if any), and the "freeze" label corresponds to the remaining
* available sleep state (if there is one).
*/
static bool relative_states;
static int __init sleep_states_setup(char *str)
{
relative_states = !strncmp(str, "1", 1);
pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2];
return 1;
}
__setup("relative_sleep_states=", sleep_states_setup);
/**
* suspend_set_ops - Set the global suspend method table.
* @ops: Suspend operations to use.
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
suspend_state_t i;
int j = 0;
lock_system_sleep();
suspend_ops = ops;
for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
if (valid_state(i)) {
pm_states[i] = pm_labels[j++];
} else if (!relative_states) {
pm_states[i] = NULL;
j++;
}
pm_states[PM_SUSPEND_FREEZE] = pm_labels[j];
unlock_system_sleep();
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
/**
* suspend_valid_only_mem - Generic memory-only valid callback.
*
* Platform drivers that implement mem suspend only and only need to check for
* that in their .valid() callback can use this instead of rolling their own
* .valid() callback.
*/
int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static bool sleep_state_supported(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter);
}
static int platform_suspend_prepare(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ?
suspend_ops->prepare() : 0;
}
static int platform_suspend_prepare_late(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
freeze_ops->prepare() : 0;
}
static int platform_suspend_prepare_noirq(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
suspend_ops->prepare_late() : 0;
}
static void platform_resume_noirq(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
suspend_ops->wake();
}
static void platform_resume_early(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
freeze_ops->restore();
}
static void platform_resume_finish(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
suspend_ops->finish();
}
static int platform_suspend_begin(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin)
return freeze_ops->begin();
else if (suspend_ops->begin)
return suspend_ops->begin(state);
else
return 0;
}
static void platform_resume_end(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
freeze_ops->end();
else if (suspend_ops->end)
suspend_ops->end();
}
static void platform_recover(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
suspend_ops->recover();
}
static bool platform_suspend_again(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ?
suspend_ops->suspend_again() : false;
}
#ifdef CONFIG_PM_DEBUG
static unsigned int pm_test_delay = 5;
module_param(pm_test_delay, uint, 0644);
MODULE_PARM_DESC(pm_test_delay,
"Number of seconds to wait before resuming from suspend test");
#endif
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
if (pm_test_level == level) {
printk(KERN_INFO "suspend debug: Waiting for %d second(s).\n",
pm_test_delay);
mdelay(pm_test_delay * 1000);
return 1;
}
#endif /* !CONFIG_PM_DEBUG */
return 0;
}
/**
* suspend_prepare - Prepare for entering system sleep state.
*
* Common code run for every system sleep state that can be entered (except for
* hibernation). Run suspend notifiers, allocate the "suspend" console and
* freeze processes.
*/
static int suspend_prepare(suspend_state_t state)
{
int error;
if (!sleep_state_supported(state))
return -EPERM;
pm_prepare_console();
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
goto Finish;
trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes();
trace_suspend_resume(TPS("freeze_processes"), 0, false);
if (!error)
return 0;
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
return error;
}
/* default implementation */
void __weak arch_suspend_disable_irqs(void)
{
local_irq_disable();
}
/* default implementation */
void __weak arch_suspend_enable_irqs(void)
{
local_irq_enable();
}
/**
* suspend_enter - Make the system enter the given sleep state.
* @state: System sleep state to enter.
* @wakeup: Returns information that the sleep state should not be re-entered.
*
* This function should be called after devices have been suspended.
*/
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
error = platform_suspend_prepare(state);
if (error)
goto Platform_finish;
error = dpm_suspend_late(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: late suspend of devices failed\n");
goto Platform_finish;
}
error = platform_suspend_prepare_late(state);
if (error)
goto Devices_early_resume;
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: noirq suspend of devices failed\n");
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
if (error)
goto Platform_wake;
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
/*
* PM_SUSPEND_FREEZE equals
* frozen processes + suspended devices + idle processors.
* Thus we should invoke freeze_enter() soon after
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
trace_suspend_resume(TPS("machine_suspend"), state, true);
freeze_enter();
trace_suspend_resume(TPS("machine_suspend"), state, false);
goto Platform_wake;
}
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
if (!(suspend_test(TEST_CORE) || *wakeup)) {
trace_suspend_resume(TPS("machine_suspend"),
state, true);
error = suspend_ops->enter(state);
trace_suspend_resume(TPS("machine_suspend"),
state, false);
events_check_enabled = false;
} else if (*wakeup) {
error = -EBUSY;
}
syscore_resume();
}
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
Enable_cpus:
enable_nonboot_cpus();
Platform_wake:
platform_resume_noirq(state);
dpm_resume_noirq(PMSG_RESUME);
Platform_early_resume:
platform_resume_early(state);
Devices_early_resume:
dpm_resume_early(PMSG_RESUME);
Platform_finish:
platform_resume_finish(state);
return error;
}
/**
* suspend_devices_and_enter - Suspend devices and enter system sleep state.
* @state: System sleep state to enter.
*/
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
bool wakeup = false;
if (!sleep_state_supported(state))
return -ENOSYS;
error = platform_suspend_begin(state);
if (error)
goto Close;
suspend_console();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
do {
error = suspend_enter(state, &wakeup);
} while (!error && !wakeup && platform_suspend_again(state));
Resume_devices:
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
trace_suspend_resume(TPS("resume_console"), state, true);
resume_console();
trace_suspend_resume(TPS("resume_console"), state, false);
Close:
platform_resume_end(state);
return error;
Recover_platform:
platform_recover(state);
goto Resume_devices;
}
/**
* suspend_finish - Clean up before finishing the suspend sequence.
*
* Call platform code to clean up, restart processes, and free the console that
* we've allocated. This routine is not called for hibernation.
*/
static void suspend_finish(void)
{
suspend_thaw_processes();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}
/**
* enter_state - Do common work needed to enter system sleep state.
* @state: System sleep state to enter.
*
* Make sure that no one else is trying to put the system into a sleep state.
* Fail if that's not the case. Otherwise, prepare for system suspend, make the
* system enter the given sleep state and clean up after wakeup.
*/
static int enter_state(suspend_state_t state)
{
int error;
trace_suspend_resume(TPS("suspend_enter"), state, true);
if (state == PM_SUSPEND_FREEZE) {
#ifdef CONFIG_PM_DEBUG
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
pr_warning("PM: Unsupported test mode for suspend to idle,"
"please choose none/freezer/devices/platform.\n");
return -EAGAIN;
}
#endif
} else if (!valid_state(state)) {
return -EINVAL;
}
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
if (state == PM_SUSPEND_FREEZE)
freeze_begin();
#ifndef CONFIG_SUSPEND_SKIP_SYNC
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
#endif
pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]);
error = suspend_prepare(state);
if (error)
goto Unlock;
if (suspend_test(TEST_FREEZER))
goto Finish;
trace_suspend_resume(TPS("suspend_enter"), state, false);
pr_debug("PM: Suspending system (%s)\n", pm_states[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
Finish:
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
mutex_unlock(&pm_mutex);
return error;
}
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
*
* Check if the value of @state represents one of the supported states,
* execute enter_state() and update system suspend statistics.
*/
int pm_suspend(suspend_state_t state)
{
int error;
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
error = enter_state(state);
if (error) {
suspend_stats.fail++;
dpm_save_failed_errno(error);
} else {
suspend_stats.success++;
}
return error;
}
EXPORT_SYMBOL(pm_suspend);
| gpl-2.0 |
TheGreatSega/Rush-Kernel | arch/powerpc/platforms/powermac/pci.c | 469 | 38028 | /*
* Support for PCI bridges found on Power Macintoshes.
*
* Copyright (C) 2003-2005 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/grackle.h>
#include <asm/ppc-pci.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
/* XXX Could be per-controller, but I don't think we risk anything by
* assuming we won't have both UniNorth and Bandit */
static int has_uninorth;
#ifdef CONFIG_PPC64
static struct pci_controller *u3_agp;
#else
static int has_second_ohare;
#endif /* CONFIG_PPC64 */
extern int pcibios_assign_bus_offset;
struct device_node *k2_skiplist[2];
/*
* Magic constants for enabling cache coherency in the bandit/PSX bridge.
*/
#define BANDIT_DEVID_2 8
#define BANDIT_REVID 3
#define BANDIT_DEVNUM 11
#define BANDIT_MAGIC 0x50
#define BANDIT_COHERENT 0x40
static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
{
for (; node != 0;node = node->sibling) {
const int * bus_range;
const unsigned int *class_code;
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
class_code = of_get_property(node, "class-code", NULL);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range != NULL && len > 2 * sizeof(int)) {
if (bus_range[1] > higher)
higher = bus_range[1];
}
higher = fixup_one_level_bus_range(node->child, higher);
}
return higher;
}
/* This routine fixes the "bus-range" property of all bridges in the
* system since they tend to have their "last" member wrong on macs
*
* Note that the bus numbers manipulated here are OF bus numbers, they
* are not Linux bus numbers.
*/
static void __init fixup_bus_range(struct device_node *bridge)
{
int *bus_range, len;
struct property *prop;
/* Lookup the "bus-range" property for the hose */
prop = of_find_property(bridge, "bus-range", &len);
if (prop == NULL || prop->length < 2 * sizeof(int))
return;
bus_range = prop->value;
bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
}
/*
* Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
*
* The "Bandit" version is present in all early PCI PowerMacs,
* and up to the first ones using Grackle. Some machines may
* have 2 bandit controllers (2 PCI busses).
*
* "Chaos" is used in some "Bandit"-type machines as a bridge
* for the separate display bus. It is accessed the same
* way as bandit, but cannot be probed for devices. It therefore
* has its own config access functions.
*
* The "UniNorth" version is present in all Core99 machines
* (iBook, G4, new IMacs, and all the recent Apple machines).
* It contains 3 controllers in one ASIC.
*
* The U3 is the bridge used on G5 machines. It contains an
* AGP bus which is dealt with the old UniNorth access routines
* and a HyperTransport bus which uses its own set of access
* functions.
*/
#define MACRISC_CFA0(devfn, off) \
((1 << (unsigned int)PCI_SLOT(dev_fn)) \
| (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
| (((unsigned int)(off)) & 0xFCUL))
#define MACRISC_CFA1(bus, devfn, off) \
((((unsigned int)(bus)) << 16) \
|(((unsigned int)(devfn)) << 8) \
|(((unsigned int)(off)) & 0xFCUL) \
|1UL)
static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
u8 bus, u8 dev_fn, u8 offset)
{
unsigned int caddr;
if (bus == hose->first_busno) {
if (dev_fn < (11 << 3))
return NULL;
caddr = MACRISC_CFA0(dev_fn, offset);
} else
caddr = MACRISC_CFA1(bus, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
out_le32(hose->cfg_addr, caddr);
} while (in_le32(hose->cfg_addr) != caddr);
offset &= has_uninorth ? 0x07 : 0x03;
return hose->cfg_data + offset;
}
static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops macrisc_pci_ops =
{
.read = macrisc_read_config,
.write = macrisc_write_config,
};
#ifdef CONFIG_PPC32
/*
* Verify that a specific (bus, dev_fn) exists on chaos
*/
static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
{
struct device_node *np;
const u32 *vendor, *device;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
np = pci_busdev_to_OF_node(bus, devfn);
if (np == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
vendor = of_get_property(np, "vendor-id", NULL);
device = of_get_property(np, "device-id", NULL);
if (vendor == NULL || device == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
&& (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
static int
chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
int result = chaos_validate_dev(bus, devfn, offset);
if (result == PCIBIOS_BAD_REGISTER_NUMBER)
*val = ~0U;
if (result != PCIBIOS_SUCCESSFUL)
return result;
return macrisc_read_config(bus, devfn, offset, len, val);
}
static int
chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
int result = chaos_validate_dev(bus, devfn, offset);
if (result != PCIBIOS_SUCCESSFUL)
return result;
return macrisc_write_config(bus, devfn, offset, len, val);
}
static struct pci_ops chaos_pci_ops =
{
.read = chaos_read_config,
.write = chaos_write_config,
};
static void __init setup_chaos(struct pci_controller *hose,
struct resource *addr)
{
/* assume a `chaos' bridge */
hose->ops = &chaos_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
}
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
/*
* These versions of U3 HyperTransport config space access ops do not
* implement self-view of the HT host yet
*/
/*
* This function deals with some "special cases" devices.
*
* 0 -> No special case
* 1 -> Skip the device but act as if the access was successfull
* (return 0xff's on reads, eventually, cache config space
* accesses in a later version)
* -1 -> Hide the device (unsuccessful acess)
*/
static int u3_ht_skip_device(struct pci_controller *hose,
struct pci_bus *bus, unsigned int devfn)
{
struct device_node *busdn, *dn;
int i;
/* We only allow config cycles to devices that are in OF device-tree
* as we are apparently having some weird things going on with some
* revs of K2 on recent G5s, except for the host bridge itself, which
* is missing from the tree but we know we can probe.
*/
if (bus->self)
busdn = pci_device_to_OF_node(bus->self);
else if (devfn == 0)
return 0;
else
busdn = hose->dn;
for (dn = busdn->child; dn; dn = dn->sibling)
if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn)
break;
if (dn == NULL)
return -1;
/*
* When a device in K2 is powered down, we die on config
* cycle accesses. Fix that here.
*/
for (i=0; i<2; i++)
if (k2_skiplist[i] == dn)
return 1;
return 0;
}
#define U3_HT_CFA0(devfn, off) \
((((unsigned int)devfn) << 8) | offset)
#define U3_HT_CFA1(bus, devfn, off) \
(U3_HT_CFA0(devfn, off) \
+ (((unsigned int)bus) << 16) \
+ 0x01000000UL)
static void __iomem *u3_ht_cfg_access(struct pci_controller *hose, u8 bus,
u8 devfn, u8 offset, int *swap)
{
*swap = 1;
if (bus == hose->first_busno) {
if (devfn != 0)
return hose->cfg_data + U3_HT_CFA0(devfn, offset);
*swap = 0;
return ((void __iomem *)hose->cfg_addr) + (offset << 2);
} else
return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
}
static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
void __iomem *addr;
int swap;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
switch (u3_ht_skip_device(hose, bus, devfn)) {
case 0:
break;
case 1:
switch (len) {
case 1:
*val = 0xff; break;
case 2:
*val = 0xffff; break;
default:
*val = 0xfffffffful; break;
}
return PCIBIOS_SUCCESSFUL;
default:
return PCIBIOS_DEVICE_NOT_FOUND;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = swap ? in_le16(addr) : in_be16(addr);
break;
default:
*val = swap ? in_le32(addr) : in_be32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
void __iomem *addr;
int swap;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
switch (u3_ht_skip_device(hose, bus, devfn)) {
case 0:
break;
case 1:
return PCIBIOS_SUCCESSFUL;
default:
return PCIBIOS_DEVICE_NOT_FOUND;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
swap ? out_le16(addr, val) : out_be16(addr, val);
break;
default:
swap ? out_le32(addr, val) : out_be32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops u3_ht_pci_ops =
{
.read = u3_ht_read_config,
.write = u3_ht_write_config,
};
#define U4_PCIE_CFA0(devfn, off) \
((1 << ((unsigned int)PCI_SLOT(dev_fn))) \
| (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
| ((((unsigned int)(off)) >> 8) << 28) \
| (((unsigned int)(off)) & 0xfcU))
#define U4_PCIE_CFA1(bus, devfn, off) \
((((unsigned int)(bus)) << 16) \
|(((unsigned int)(devfn)) << 8) \
| ((((unsigned int)(off)) >> 8) << 28) \
|(((unsigned int)(off)) & 0xfcU) \
|1UL)
static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
u8 bus, u8 dev_fn, int offset)
{
unsigned int caddr;
if (bus == hose->first_busno) {
caddr = U4_PCIE_CFA0(dev_fn, offset);
} else
caddr = U4_PCIE_CFA1(bus, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
out_le32(hose->cfg_addr, caddr);
} while (in_le32(hose->cfg_addr) != caddr);
offset &= 0x03;
return hose->cfg_data + offset;
}
static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x1000)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x1000)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops u4_pcie_pci_ops =
{
.read = u4_pcie_read_config,
.write = u4_pcie_write_config,
};
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
/*
* For a bandit bridge, turn on cache coherency if necessary.
* N.B. we could clean this up using the hose ops directly.
*/
static void __init init_bandit(struct pci_controller *bp)
{
unsigned int vendev, magic;
int rev;
/* read the word at offset 0 in config space for device 11 */
out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
udelay(2);
vendev = in_le32(bp->cfg_data);
if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
PCI_VENDOR_ID_APPLE) {
/* read the revision id */
out_le32(bp->cfg_addr,
(1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
udelay(2);
rev = in_8(bp->cfg_data);
if (rev != BANDIT_REVID)
printk(KERN_WARNING
"Unknown revision %d for bandit\n", rev);
} else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
return;
}
/* read the word at offset 0x50 */
out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
udelay(2);
magic = in_le32(bp->cfg_data);
if ((magic & BANDIT_COHERENT) != 0)
return;
magic |= BANDIT_COHERENT;
udelay(2);
out_le32(bp->cfg_data, magic);
printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
}
/*
* Tweak the PCI-PCI bridge chip on the blue & white G3s.
*/
static void __init init_p2pbridge(void)
{
struct device_node *p2pbridge;
struct pci_controller* hose;
u8 bus, devfn;
u16 val;
/* XXX it would be better here to identify the specific
PCI-PCI bridge chip we have. */
p2pbridge = of_find_node_by_name(NULL, "pci-bridge");
if (p2pbridge == NULL
|| p2pbridge->parent == NULL
|| strcmp(p2pbridge->parent->name, "pci") != 0)
goto done;
if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
DBG("Can't find PCI infos for PCI<->PCI bridge\n");
goto done;
}
/* Warning: At this point, we have not yet renumbered all busses.
* So we must use OF walking to find out hose
*/
hose = pci_find_hose_for_OF_device(p2pbridge);
if (!hose) {
DBG("Can't find hose for PCI<->PCI bridge\n");
goto done;
}
if (early_read_config_word(hose, bus, devfn,
PCI_BRIDGE_CONTROL, &val) < 0) {
printk(KERN_ERR "init_p2pbridge: couldn't read bridge"
" control\n");
goto done;
}
val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
done:
of_node_put(p2pbridge);
}
static void __init init_second_ohare(void)
{
struct device_node *np = of_find_node_by_name(NULL, "pci106b,7");
unsigned char bus, devfn;
unsigned short cmd;
if (np == NULL)
return;
/* This must run before we initialize the PICs since the second
* ohare hosts a PIC that will be accessed there.
*/
if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
struct pci_controller* hose =
pci_find_hose_for_OF_device(np);
if (!hose) {
printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
of_node_put(np);
return;
}
early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
cmd &= ~PCI_COMMAND_IO;
early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
}
has_second_ohare = 1;
of_node_put(np);
}
/*
* Some Apple desktop machines have a NEC PD720100A USB2 controller
* on the motherboard. Open Firmware, on these, will disable the
* EHCI part of it so it behaves like a pair of OHCI's. This fixup
* code re-enables it ;)
*/
static void __init fixup_nec_usb2(void)
{
struct device_node *nec;
for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) {
struct pci_controller *hose;
u32 data;
const u32 *prop;
u8 bus, devfn;
prop = of_get_property(nec, "vendor-id", NULL);
if (prop == NULL)
continue;
if (0x1033 != *prop)
continue;
prop = of_get_property(nec, "device-id", NULL);
if (prop == NULL)
continue;
if (0x0035 != *prop)
continue;
prop = of_get_property(nec, "reg", NULL);
if (prop == NULL)
continue;
devfn = (prop[0] >> 8) & 0xff;
bus = (prop[0] >> 16) & 0xff;
if (PCI_FUNC(devfn) != 0)
continue;
hose = pci_find_hose_for_OF_device(nec);
if (!hose)
continue;
early_read_config_dword(hose, bus, devfn, 0xe4, &data);
if (data & 1UL) {
printk("Found NEC PD720100A USB2 chip with disabled"
" EHCI, fixing up...\n");
data &= ~1UL;
early_write_config_dword(hose, bus, devfn, 0xe4, data);
}
}
}
static void __init setup_bandit(struct pci_controller *hose,
struct resource *addr)
{
hose->ops = ¯isc_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
init_bandit(hose);
}
static int __init setup_uninorth(struct pci_controller *hose,
struct resource *addr)
{
ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
has_uninorth = 1;
hose->ops = ¯isc_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
/* We "know" that the bridge at f2000000 has the PCI slots. */
return addr->start == 0xf2000000;
}
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
static void __init setup_u3_agp(struct pci_controller* hose)
{
/* On G5, we move AGP up to high bus number so we don't need
* to reassign bus numbers for HT. If we ever have P2P bridges
* on AGP, we'll have to move pci_assign_all_busses to the
* pci_controller structure so we enable it for AGP and not for
* HT childs.
* We hard code the address because of the different size of
* the reg address cell, we shall fix that by killing struct
* reg_property and using some accessor functions instead
*/
hose->first_busno = 0xf0;
hose->last_busno = 0xff;
has_uninorth = 1;
hose->ops = ¯isc_pci_ops;
hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
u3_agp = hose;
}
static void __init setup_u4_pcie(struct pci_controller* hose)
{
/* We currently only implement the "non-atomic" config space, to
* be optimised later.
*/
hose->ops = &u4_pcie_pci_ops;
hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
/* The bus contains a bridge from root -> device, we need to
* make it visible on bus 0 so that we pick the right type
* of config cycles. If we didn't, we would have to force all
* config cycles to be type 1. So we override the "bus-range"
* property here
*/
hose->first_busno = 0x00;
hose->last_busno = 0xff;
}
static void __init parse_region_decode(struct pci_controller *hose,
u32 decode)
{
unsigned long base, end, next = -1;
int i, cur = -1;
/* Iterate through all bits. We ignore the last bit as this region is
* reserved for the ROM among other niceties
*/
for (i = 0; i < 31; i++) {
if ((decode & (0x80000000 >> i)) == 0)
continue;
if (i < 16) {
base = 0xf0000000 | (((u32)i) << 24);
end = base + 0x00ffffff;
} else {
base = ((u32)i-16) << 28;
end = base + 0x0fffffff;
}
if (base != next) {
if (++cur >= 3) {
printk(KERN_WARNING "PCI: Too many ranges !\n");
break;
}
hose->mem_resources[cur].flags = IORESOURCE_MEM;
hose->mem_resources[cur].name = hose->dn->full_name;
hose->mem_resources[cur].start = base;
hose->mem_resources[cur].end = end;
DBG(" %d: 0x%08lx-0x%08lx\n", cur, base, end);
} else {
DBG(" : -0x%08lx\n", end);
hose->mem_resources[cur].end = end;
}
next = end + 1;
}
}
static void __init setup_u3_ht(struct pci_controller* hose)
{
struct device_node *np = hose->dn;
struct resource cfg_res, self_res;
u32 decode;
hose->ops = &u3_ht_pci_ops;
/* Get base addresses from OF tree
*/
if (of_address_to_resource(np, 0, &cfg_res) ||
of_address_to_resource(np, 1, &self_res)) {
printk(KERN_ERR "PCI: Failed to get U3/U4 HT resources !\n");
return;
}
/* Map external cfg space access into cfg_data and self registers
* into cfg_addr
*/
hose->cfg_data = ioremap(cfg_res.start, 0x02000000);
hose->cfg_addr = ioremap(self_res.start,
self_res.end - self_res.start + 1);
/*
* /ht node doesn't expose a "ranges" property, we read the register
* that controls the decoding logic and use that for memory regions.
* The IO region is hard coded since it is fixed in HW as well.
*/
hose->io_base_phys = 0xf4000000;
hose->pci_io_size = 0x00400000;
hose->io_resource.name = np->full_name;
hose->io_resource.start = 0;
hose->io_resource.end = 0x003fffff;
hose->io_resource.flags = IORESOURCE_IO;
hose->pci_mem_offset = 0;
hose->first_busno = 0;
hose->last_busno = 0xef;
/* Note: fix offset when cfg_addr becomes a void * */
decode = in_be32(hose->cfg_addr + 0x80);
DBG("PCI: Apple HT bridge decode register: 0x%08x\n", decode);
/* NOTE: The decode register setup is a bit weird... region
* 0xf8000000 for example is marked as enabled in there while it's
& actually the memory controller registers.
* That means that we are incorrectly attributing it to HT.
*
* In a similar vein, region 0xf4000000 is actually the HT IO space but
* also marked as enabled in here and 0xf9000000 is used by some other
* internal bits of the northbridge.
*
* Unfortunately, we can't just mask out those bit as we would end
* up with more regions than we can cope (linux can only cope with
* 3 memory regions for a PHB at this stage).
*
* So for now, we just do a little hack. We happen to -know- that
* Apple firmware doesn't assign things below 0xfa000000 for that
* bridge anyway so we mask out all bits we don't want.
*/
decode &= 0x003fffff;
/* Now parse the resulting bits and build resources */
parse_region_decode(hose, decode);
}
#endif /* CONFIG_PPC64 */
/*
* We assume that if we have a G3 powermac, we have one bridge called
* "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
* if we have one or more bandit or chaos bridges, we don't have a MPC106.
*/
static int __init pmac_add_bridge(struct device_node *dev)
{
int len;
struct pci_controller *hose;
struct resource rsrc;
char *disp_name;
const int *bus_range;
int primary = 1, has_address = 0;
DBG("Adding PCI host bridge %s\n", dev->full_name);
/* Fetch host bridge registers address */
has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %s, assume"
" bus 0\n", dev->full_name);
}
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
disp_name = NULL;
/* 64 bits only bridges */
#ifdef CONFIG_PPC64
if (of_device_is_compatible(dev, "u3-agp")) {
setup_u3_agp(hose);
disp_name = "U3-AGP";
primary = 0;
} else if (of_device_is_compatible(dev, "u3-ht")) {
setup_u3_ht(hose);
disp_name = "U3-HT";
primary = 1;
} else if (of_device_is_compatible(dev, "u4-pcie")) {
setup_u4_pcie(hose);
disp_name = "U4-PCIE";
primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number:"
" %d->%d\n", disp_name, hose->first_busno, hose->last_busno);
#endif /* CONFIG_PPC64 */
/* 32 bits only bridges */
#ifdef CONFIG_PPC32
if (of_device_is_compatible(dev, "uni-north")) {
primary = setup_uninorth(hose, &rsrc);
disp_name = "UniNorth";
} else if (strcmp(dev->name, "pci") == 0) {
/* XXX assume this is a mpc106 (grackle) */
setup_grackle(hose);
disp_name = "Grackle (MPC106)";
} else if (strcmp(dev->name, "bandit") == 0) {
setup_bandit(hose, &rsrc);
disp_name = "Bandit";
} else if (strcmp(dev->name, "chaos") == 0) {
setup_chaos(hose, &rsrc);
disp_name = "Chaos";
primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
disp_name, (unsigned long long)rsrc.start, hose->first_busno,
hose->last_busno);
#endif /* CONFIG_PPC32 */
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
return 0;
}
void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
{
#ifdef CONFIG_PPC32
/* Fixup interrupt for the modem/ethernet combo controller.
* on machines with a second ohare chip.
* The number in the device tree (27) is bogus (correct for
* the ethernet-only board but not the combo ethernet/modem
* board). The real interrupt is 28 on the second controller
* -> 28+32 = 60.
*/
if (has_second_ohare &&
dev->vendor == PCI_VENDOR_ID_DEC &&
dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
dev->irq = irq_create_mapping(NULL, 60);
set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
#endif /* CONFIG_PPC32 */
}
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
struct device_node *ht = NULL;
ppc_pci_set_flags(PPC_PCI_CAN_SKIP_ISA_ALIGN);
root = of_find_node_by_path("/");
if (root == NULL) {
printk(KERN_CRIT "pmac_pci_init: can't find root "
"of device tree\n");
return;
}
for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
if (np->name == NULL)
continue;
if (strcmp(np->name, "bandit") == 0
|| strcmp(np->name, "chaos") == 0
|| strcmp(np->name, "pci") == 0) {
if (pmac_add_bridge(np) == 0)
of_node_get(np);
}
if (strcmp(np->name, "ht") == 0) {
of_node_get(np);
ht = np;
}
}
of_node_put(root);
#ifdef CONFIG_PPC64
/* Probe HT last as it relies on the agp resources to be already
* setup
*/
if (ht && pmac_add_bridge(ht) != 0)
of_node_put(ht);
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
/* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
* assume there is no P2P bridge on the AGP bus, which should be a
* safe assumptions for now. We should do something better in the
* future though
*/
if (u3_agp) {
struct device_node *np = u3_agp->dn;
PCI_DN(np)->busno = 0xf0;
for (np = np->child; np; np = np->sibling)
PCI_DN(np)->busno = 0xf0;
}
/* pmac_check_ht_link(); */
/* We can allocate missing resources if any */
pci_probe_only = 0;
#else /* CONFIG_PPC64 */
init_p2pbridge();
init_second_ohare();
fixup_nec_usb2();
/* We are still having some issues with the Xserve G4, enabling
* some offset between bus number and domains for now when we
* assign all busses should help for now
*/
if (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
pcibios_assign_bus_offset = 0x10;
#endif
}
#ifdef CONFIG_PPC32
int pmac_pci_enable_device_hook(struct pci_dev *dev)
{
struct device_node* node;
int updatecfg = 0;
int uninorth_child;
node = pci_device_to_OF_node(dev);
/* We don't want to enable USB controllers absent from the OF tree
* (iBook second controller)
*/
if (dev->vendor == PCI_VENDOR_ID_APPLE
&& dev->class == PCI_CLASS_SERIAL_USB_OHCI
&& !node) {
printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
pci_name(dev));
return -EINVAL;
}
if (!node)
return 0;
uninorth_child = node->parent &&
of_device_is_compatible(node->parent, "uni-north");
/* Firewire & GMAC were disabled after PCI probe, the driver is
* claiming them, we must re-enable them now.
*/
if (uninorth_child && !strcmp(node->name, "firewire") &&
(of_device_is_compatible(node, "pci106b,18") ||
of_device_is_compatible(node, "pci106b,30") ||
of_device_is_compatible(node, "pci11c1,5811"))) {
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
updatecfg = 1;
}
if (uninorth_child && !strcmp(node->name, "ethernet") &&
of_device_is_compatible(node, "gmac")) {
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
updatecfg = 1;
}
/*
* Fixup various header fields on 32 bits. We don't do that on
* 64 bits as some of these have strange values behind the HT
* bridge and we must not, for example, enable MWI or set the
* cache line size on them.
*/
if (updatecfg) {
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
| PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
L1_CACHE_BYTES >> 2);
}
return 0;
}
void __devinit pmac_pci_fixup_ohci(struct pci_dev *dev)
{
struct device_node *node = pci_device_to_OF_node(dev);
/* We don't want to assign resources to USB controllers
* absent from the OF tree (iBook second controller)
*/
if (dev->class == PCI_CLASS_SERIAL_USB_OHCI && !node)
dev->resource[0].flags = 0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_ANY_ID, pmac_pci_fixup_ohci);
/* We power down some devices after they have been probed. They'll
* be powered back on later on
*/
void __init pmac_pcibios_after_init(void)
{
struct device_node* nd;
for_each_node_by_name(nd, "firewire") {
if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") ||
of_device_is_compatible(nd, "pci106b,30") ||
of_device_is_compatible(nd, "pci11c1,5811"))
&& of_device_is_compatible(nd->parent, "uni-north")) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
}
}
of_node_put(nd);
for_each_node_by_name(nd, "ethernet") {
if (nd->parent && of_device_is_compatible(nd, "gmac")
&& of_device_is_compatible(nd->parent, "uni-north"))
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
}
of_node_put(nd);
}
void pmac_pci_fixup_cardbus(struct pci_dev* dev)
{
if (!machine_is(powermac))
return;
/*
* Fix the interrupt routing on the various cardbus bridges
* used on powerbooks
*/
if (dev->vendor != PCI_VENDOR_ID_TI)
return;
if (dev->device == PCI_DEVICE_ID_TI_1130 ||
dev->device == PCI_DEVICE_ID_TI_1131) {
u8 val;
/* Enable PCI interrupt */
if (pci_read_config_byte(dev, 0x91, &val) == 0)
pci_write_config_byte(dev, 0x91, val | 0x30);
/* Disable ISA interrupt mode */
if (pci_read_config_byte(dev, 0x92, &val) == 0)
pci_write_config_byte(dev, 0x92, val & ~0x06);
}
if (dev->device == PCI_DEVICE_ID_TI_1210 ||
dev->device == PCI_DEVICE_ID_TI_1211 ||
dev->device == PCI_DEVICE_ID_TI_1410 ||
dev->device == PCI_DEVICE_ID_TI_1510) {
u8 val;
/* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
signal out the MFUNC0 pin */
if (pci_read_config_byte(dev, 0x8c, &val) == 0)
pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
/* Disable ISA interrupt mode */
if (pci_read_config_byte(dev, 0x92, &val) == 0)
pci_write_config_byte(dev, 0x92, val & ~0x06);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
void pmac_pci_fixup_pciata(struct pci_dev* dev)
{
u8 progif = 0;
/*
* On PowerMacs, we try to switch any PCI ATA controller to
* fully native mode
*/
if (!machine_is(powermac))
return;
/* Some controllers don't have the class IDE */
if (dev->vendor == PCI_VENDOR_ID_PROMISE)
switch(dev->device) {
case PCI_DEVICE_ID_PROMISE_20246:
case PCI_DEVICE_ID_PROMISE_20262:
case PCI_DEVICE_ID_PROMISE_20263:
case PCI_DEVICE_ID_PROMISE_20265:
case PCI_DEVICE_ID_PROMISE_20267:
case PCI_DEVICE_ID_PROMISE_20268:
case PCI_DEVICE_ID_PROMISE_20269:
case PCI_DEVICE_ID_PROMISE_20270:
case PCI_DEVICE_ID_PROMISE_20271:
case PCI_DEVICE_ID_PROMISE_20275:
case PCI_DEVICE_ID_PROMISE_20276:
case PCI_DEVICE_ID_PROMISE_20277:
goto good;
}
/* Others, check PCI class */
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
good:
pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
if ((progif & 5) != 5) {
printk(KERN_INFO "PCI: %s Forcing PCI IDE into native mode\n",
pci_name(dev));
(void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
(progif & 5) != 5)
printk(KERN_ERR "Rewrite of PROGIF failed !\n");
else {
/* Clear IO BARs, they will be reassigned */
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, 0);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_3, 0);
}
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
#endif /* CONFIG_PPC32 */
/*
* Disable second function on K2-SATA, it's broken
* and disable IO BARs on first one
*/
static void fixup_k2_sata(struct pci_dev* dev)
{
int i;
u16 cmd;
if (PCI_FUNC(dev->devfn) > 0) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(dev, PCI_COMMAND, cmd);
for (i = 0; i < 6; i++) {
dev->resource[i].start = dev->resource[i].end = 0;
dev->resource[i].flags = 0;
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
0);
}
} else {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd &= ~PCI_COMMAND_IO;
pci_write_config_word(dev, PCI_COMMAND, cmd);
for (i = 0; i < 5; i++) {
dev->resource[i].start = dev->resource[i].end = 0;
dev->resource[i].flags = 0;
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
0);
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
/*
* On U4 (aka CPC945) the PCIe root complex "P2P" bridge resource ranges aren't
* configured by the firmware. The bridge itself seems to ignore them but it
* causes problems with Linux which then re-assigns devices below the bridge,
* thus changing addresses of those devices from what was in the device-tree,
* which sucks when those are video cards using offb
*
* We could just mark it transparent but I prefer fixing up the resources to
* properly show what's going on here, as I have some doubts about having them
* badly configured potentially being an issue for DMA.
*
* We leave PIO alone, it seems to be fine
*
* Oh and there's another funny bug. The OF properties advertize the region
* 0xf1000000..0xf1ffffff as being forwarded as memory space. But that's
* actually not true, this region is the memory mapped config space. So we
* also need to filter it out or we'll map things in the wrong place.
*/
static void fixup_u4_pcie(struct pci_dev* dev)
{
struct pci_controller *host = pci_bus_to_host(dev->bus);
struct resource *region = NULL;
u32 reg;
int i;
/* Only do that on PowerMac */
if (!machine_is(powermac))
return;
/* Find the largest MMIO region */
for (i = 0; i < 3; i++) {
struct resource *r = &host->mem_resources[i];
if (!(r->flags & IORESOURCE_MEM))
continue;
/* Skip the 0xf0xxxxxx..f2xxxxxx regions, we know they
* are reserved by HW for other things
*/
if (r->start >= 0xf0000000 && r->start < 0xf3000000)
continue;
if (!region || (r->end - r->start) >
(region->end - region->start))
region = r;
}
/* Nothing found, bail */
if (region == 0)
return;
/* Print things out */
printk(KERN_INFO "PCI: Fixup U4 PCIe bridge range: %pR\n", region);
/* Fixup bridge config space. We know it's a Mac, resource aren't
* offset so let's just blast them as-is. We also know that they
* fit in 32 bits
*/
reg = ((region->start >> 16) & 0xfff0) | (region->end & 0xfff00000);
pci_write_config_dword(dev, PCI_MEMORY_BASE, reg);
pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0);
pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
| gpl-2.0 |
silvesterlee/linux | drivers/dma/ioat/sysfs.c | 725 | 3886 | /*
* Intel I/OAT DMA Linux driver
* Copyright(c) 2004 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/pci.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
#include "../dmaengine.h"
static ssize_t cap_show(struct dma_chan *c, char *page)
{
struct dma_device *dma = c->device;
return sprintf(page, "copy%s%s%s%s%s\n",
dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
}
struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
static ssize_t version_show(struct dma_chan *c, char *page)
{
struct dma_device *dma = c->device;
struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
return sprintf(page, "%d.%d\n",
ioat_dma->version >> 4, ioat_dma->version & 0xf);
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
static ssize_t
ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct ioat_sysfs_entry *entry;
struct ioatdma_chan *ioat_chan;
entry = container_of(attr, struct ioat_sysfs_entry, attr);
ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
if (!entry->show)
return -EIO;
return entry->show(&ioat_chan->dma_chan, page);
}
const struct sysfs_ops ioat_sysfs_ops = {
.show = ioat_attr_show,
};
void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
{
struct dma_device *dma = &ioat_dma->dma_dev;
struct dma_chan *c;
list_for_each_entry(c, &dma->channels, device_node) {
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
struct kobject *parent = &c->dev->device.kobj;
int err;
err = kobject_init_and_add(&ioat_chan->kobj, type,
parent, "quickdata");
if (err) {
dev_warn(to_dev(ioat_chan),
"sysfs init error (%d), continuing...\n", err);
kobject_put(&ioat_chan->kobj);
set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
}
}
}
void ioat_kobject_del(struct ioatdma_device *ioat_dma)
{
struct dma_device *dma = &ioat_dma->dma_dev;
struct dma_chan *c;
list_for_each_entry(c, &dma->channels, device_node) {
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
kobject_del(&ioat_chan->kobj);
kobject_put(&ioat_chan->kobj);
}
}
}
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
/* ...taken outside the lock, no need to be precise */
return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
static struct attribute *ioat_attrs[] = {
&ring_size_attr.attr,
&ring_active_attr.attr,
&ioat_cap_attr.attr,
&ioat_version_attr.attr,
NULL,
};
struct kobj_type ioat_ktype = {
.sysfs_ops = &ioat_sysfs_ops,
.default_attrs = ioat_attrs,
};
| gpl-2.0 |
hsavit1/linux | drivers/scsi/scsi_trace.c | 981 | 6760 | /*
* Copyright (C) 2010 FUJITSU LIMITED
* Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/trace_seq.h>
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
txlen = cdb[4];
trace_seq_printf(p, "lba=%llu txlen=%llu",
(unsigned long long)lba, (unsigned long long)txlen);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= (cdb[2] << 24);
lba |= (cdb[3] << 16);
lba |= (cdb[4] << 8);
lba |= cdb[5];
txlen |= (cdb[7] << 8);
txlen |= cdb[8];
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= (cdb[2] << 24);
lba |= (cdb[3] << 16);
lba |= (cdb[4] << 8);
lba |= cdb[5];
txlen |= (cdb[6] << 24);
txlen |= (cdb[7] << 16);
txlen |= (cdb[8] << 8);
txlen |= cdb[9];
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
lba |= ((u64)cdb[2] << 56);
lba |= ((u64)cdb[3] << 48);
lba |= ((u64)cdb[4] << 40);
lba |= ((u64)cdb[5] << 32);
lba |= (cdb[6] << 24);
lba |= (cdb[7] << 16);
lba |= (cdb[8] << 8);
lba |= cdb[9];
txlen |= (cdb[10] << 24);
txlen |= (cdb[11] << 16);
txlen |= (cdb[12] << 8);
txlen |= cdb[13];
trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
(unsigned long long)lba, (unsigned long long)txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
sector_t lba = 0, txlen = 0;
u32 ei_lbrt = 0;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
cmd = "READ";
break;
case VERIFY_32:
cmd = "VERIFY";
break;
case WRITE_32:
cmd = "WRITE";
break;
case WRITE_SAME_32:
cmd = "WRITE_SAME";
break;
default:
trace_seq_puts(p, "UNKNOWN");
goto out;
}
lba |= ((u64)cdb[12] << 56);
lba |= ((u64)cdb[13] << 48);
lba |= ((u64)cdb[14] << 40);
lba |= ((u64)cdb[15] << 32);
lba |= (cdb[16] << 24);
lba |= (cdb[17] << 16);
lba |= (cdb[18] << 8);
lba |= cdb[19];
ei_lbrt |= (cdb[20] << 24);
ei_lbrt |= (cdb[21] << 16);
ei_lbrt |= (cdb[22] << 8);
ei_lbrt |= cdb[23];
txlen |= (cdb[28] << 24);
txlen |= (cdb[29] << 16);
txlen |= (cdb[30] << 8);
txlen |= cdb[31];
trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
cmd, (unsigned long long)lba,
(unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
out:
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
unsigned int regions = cdb[7] << 8 | cdb[8];
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
sector_t lba = 0;
u32 alloc_len = 0;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
cmd = "READ_CAPACITY_16";
break;
case SAI_GET_LBA_STATUS:
cmd = "GET_LBA_STATUS";
break;
default:
trace_seq_puts(p, "UNKNOWN");
goto out;
}
lba |= ((u64)cdb[2] << 56);
lba |= ((u64)cdb[3] << 48);
lba |= ((u64)cdb[4] << 40);
lba |= ((u64)cdb[5] << 32);
lba |= (cdb[6] << 24);
lba |= (cdb[7] << 16);
lba |= (cdb[8] << 8);
lba |= cdb[9];
alloc_len |= (cdb[10] << 24);
alloc_len |= (cdb[11] << 16);
alloc_len |= (cdb[12] << 8);
alloc_len |= cdb[13];
trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
(unsigned long long)lba, alloc_len);
out:
trace_seq_putc(p, 0);
return ret;
}
static const char *
scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
{
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
case VERIFY_32:
case WRITE_32:
case WRITE_SAME_32:
return scsi_trace_rw32(p, cdb, len);
default:
return scsi_trace_misc(p, cdb, len);
}
}
static const char *
scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
trace_seq_putc(p, '-');
trace_seq_putc(p, 0);
return ret;
}
const char *
scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
{
switch (cdb[0]) {
case READ_6:
case WRITE_6:
return scsi_trace_rw6(p, cdb, len);
case READ_10:
case VERIFY:
case WRITE_10:
case WRITE_SAME:
return scsi_trace_rw10(p, cdb, len);
case READ_12:
case VERIFY_12:
case WRITE_12:
return scsi_trace_rw12(p, cdb, len);
case READ_16:
case VERIFY_16:
case WRITE_16:
case WRITE_SAME_16:
return scsi_trace_rw16(p, cdb, len);
case UNMAP:
return scsi_trace_unmap(p, cdb, len);
case SERVICE_ACTION_IN_16:
return scsi_trace_service_action_in(p, cdb, len);
case VARIABLE_LENGTH_CMD:
return scsi_trace_varlen(p, cdb, len);
default:
return scsi_trace_misc(p, cdb, len);
}
}
| gpl-2.0 |
rodrigues-daniel/linux | arch/powerpc/platforms/cell/spu_callbacks.c | 1237 | 2177 | /*
* System call callback functions for SPUs
*/
#undef DEBUG
#include <linux/kallsyms.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <asm/spu.h>
#include <asm/syscalls.h>
#include <asm/unistd.h>
/*
* This table defines the system calls that an SPU can call.
* It is currently a subset of the 64 bit powerpc system calls,
* with the exact semantics.
*
* The reasons for disabling some of the system calls are:
* 1. They interact with the way SPU syscalls are handled
* and we can't let them execute ever:
* restart_syscall, exit, for, execve, ptrace, ...
* 2. They are deprecated and replaced by other means:
* uselib, pciconfig_*, sysfs, ...
* 3. They are somewhat interacting with the system in a way
* we don't want an SPU to:
* reboot, init_module, mount, kexec_load
* 4. They are optional and we can't rely on them being
* linked into the kernel. Unfortunately, the cond_syscall
* helper does not work here as it does not add the necessary
* opd symbols:
* mbind, mq_open, ipc, ...
*/
static void *spu_syscall_table[] = {
#define SYSCALL(func) sys_ni_syscall,
#define COMPAT_SYS(func) sys_ni_syscall,
#define PPC_SYS(func) sys_ni_syscall,
#define OLDSYS(func) sys_ni_syscall,
#define SYS32ONLY(func) sys_ni_syscall,
#define PPC64ONLY(func) sys_ni_syscall,
#define SYSX(f, f3264, f32) sys_ni_syscall,
#define SYSCALL_SPU(func) sys_##func,
#define COMPAT_SYS_SPU(func) sys_##func,
#define PPC_SYS_SPU(func) ppc_##func,
#define SYSX_SPU(f, f3264, f32) f,
#include <asm/systbl.h>
};
long spu_sys_callback(struct spu_syscall_block *s)
{
long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret);
return -ENOSYS;
}
syscall = spu_syscall_table[s->nr_ret];
pr_debug("SPU-syscall "
"%pSR:syscall%lld(%llx, %llx, %llx, %llx, %llx, %llx)\n",
syscall,
s->nr_ret,
s->parm[0], s->parm[1], s->parm[2],
s->parm[3], s->parm[4], s->parm[5]);
return syscall(s->parm[0], s->parm[1], s->parm[2],
s->parm[3], s->parm[4], s->parm[5]);
}
EXPORT_SYMBOL_GPL(spu_sys_callback);
| gpl-2.0 |
u-blox/linux | arch/powerpc/sysdev/qe_lib/ucc_fast.c | 1749 | 11067 | /*
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
*
* Description:
* QE UCC Fast API Set - UCC Fast specific routines implementations.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/export.h>
#include <asm/io.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
{
printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
&uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
&uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
&uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
&uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
}
EXPORT_SYMBOL(ucc_fast_dump_regs);
u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
{
switch (uccf_num) {
case 0: return QE_CR_SUBBLOCK_UCCFAST1;
case 1: return QE_CR_SUBBLOCK_UCCFAST2;
case 2: return QE_CR_SUBBLOCK_UCCFAST3;
case 3: return QE_CR_SUBBLOCK_UCCFAST4;
case 4: return QE_CR_SUBBLOCK_UCCFAST5;
case 5: return QE_CR_SUBBLOCK_UCCFAST6;
case 6: return QE_CR_SUBBLOCK_UCCFAST7;
case 7: return QE_CR_SUBBLOCK_UCCFAST8;
default: return QE_CR_SUBBLOCK_INVALID;
}
}
EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
{
out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
}
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
{
struct ucc_fast __iomem *uf_regs;
u32 gumr;
uf_regs = uccf->uf_regs;
/* Enable reception and/or transmission on this UCC. */
gumr = in_be32(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr |= UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 1;
}
if (mode & COMM_DIR_RX) {
gumr |= UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 1;
}
out_be32(&uf_regs->gumr, gumr);
}
EXPORT_SYMBOL(ucc_fast_enable);
void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
{
struct ucc_fast __iomem *uf_regs;
u32 gumr;
uf_regs = uccf->uf_regs;
/* Disable reception and/or transmission on this UCC. */
gumr = in_be32(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr &= ~UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 0;
}
if (mode & COMM_DIR_RX) {
gumr &= ~UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 0;
}
out_be32(&uf_regs->gumr, gumr);
}
EXPORT_SYMBOL(ucc_fast_disable);
int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
{
struct ucc_fast_private *uccf;
struct ucc_fast __iomem *uf_regs;
u32 gumr;
int ret;
if (!uf_info)
return -EINVAL;
/* check if the UCC port number is in range. */
if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
printk(KERN_ERR "%s: illegal UCC number\n", __func__);
return -EINVAL;
}
/* Check that 'max_rx_buf_length' is properly aligned (4). */
if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
__func__);
return -EINVAL;
}
/* Validate Virtual Fifo register values */
if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
printk(KERN_ERR "%s: urfs is too small\n", __func__);
return -EINVAL;
}
if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
return -EINVAL;
}
if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
return -EINVAL;
}
uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
if (!uccf) {
printk(KERN_ERR "%s: Cannot allocate private data\n",
__func__);
return -ENOMEM;
}
/* Fill fast UCC structure */
uccf->uf_info = uf_info;
/* Set the PHY base address */
uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
if (uccf->uf_regs == NULL) {
printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
kfree(uccf);
return -ENOMEM;
}
uccf->enabled_tx = 0;
uccf->enabled_rx = 0;
uccf->stopped_tx = 0;
uccf->stopped_rx = 0;
uf_regs = uccf->uf_regs;
uccf->p_ucce = &uf_regs->ucce;
uccf->p_uccm = &uf_regs->uccm;
#ifdef CONFIG_UGETH_TX_ON_DEMAND
uccf->p_utodr = &uf_regs->utodr;
#endif
#ifdef STATISTICS
uccf->tx_frames = 0;
uccf->rx_frames = 0;
uccf->rx_discarded = 0;
#endif /* STATISTICS */
/* Set UCC to fast type */
ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
if (ret) {
printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
ucc_fast_free(uccf);
return ret;
}
uccf->mrblr = uf_info->max_rx_buf_length;
/* Set GUMR */
/* For more details see the hardware spec. */
gumr = uf_info->ttx_trx;
if (uf_info->tci)
gumr |= UCC_FAST_GUMR_TCI;
if (uf_info->cdp)
gumr |= UCC_FAST_GUMR_CDP;
if (uf_info->ctsp)
gumr |= UCC_FAST_GUMR_CTSP;
if (uf_info->cds)
gumr |= UCC_FAST_GUMR_CDS;
if (uf_info->ctss)
gumr |= UCC_FAST_GUMR_CTSS;
if (uf_info->txsy)
gumr |= UCC_FAST_GUMR_TXSY;
if (uf_info->rsyn)
gumr |= UCC_FAST_GUMR_RSYN;
gumr |= uf_info->synl;
if (uf_info->rtsm)
gumr |= UCC_FAST_GUMR_RTSM;
gumr |= uf_info->renc;
if (uf_info->revd)
gumr |= UCC_FAST_GUMR_REVD;
gumr |= uf_info->tenc;
gumr |= uf_info->tcrc;
gumr |= uf_info->mode;
out_be32(&uf_regs->gumr, gumr);
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
__func__);
uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
/* Allocate memory for Rx Virtual Fifo */
uccf->ucc_fast_rx_virtual_fifo_base_offset =
qe_muram_alloc(uf_info->urfs +
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
__func__);
uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
/* Set Virtual Fifo registers */
out_be16(&uf_regs->urfs, uf_info->urfs);
out_be16(&uf_regs->urfet, uf_info->urfet);
out_be16(&uf_regs->urfset, uf_info->urfset);
out_be16(&uf_regs->utfs, uf_info->utfs);
out_be16(&uf_regs->utfet, uf_info->utfet);
out_be16(&uf_regs->utftt, uf_info->utftt);
/* utfb, urfb are offsets from MURAM base */
out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
/* Mux clocking */
/* Grant Support */
ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
/* Breakpoint Support */
ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
/* Set Tsa or NMSI mode. */
ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
/* If NMSI (not Tsa), set Tx and Rx clock. */
if (!uf_info->tsa) {
/* Rx clock routing */
if ((uf_info->rx_clock != QE_CLK_NONE) &&
ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
COMM_DIR_RX)) {
printk(KERN_ERR "%s: illegal value for RX clock\n",
__func__);
ucc_fast_free(uccf);
return -EINVAL;
}
/* Tx clock routing */
if ((uf_info->tx_clock != QE_CLK_NONE) &&
ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
COMM_DIR_TX)) {
printk(KERN_ERR "%s: illegal value for TX clock\n",
__func__);
ucc_fast_free(uccf);
return -EINVAL;
}
}
/* Set interrupt mask register at UCC level. */
out_be32(&uf_regs->uccm, uf_info->uccm_mask);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
out_be32(&uf_regs->ucce, 0xffffffff);
*uccf_ret = uccf;
return 0;
}
EXPORT_SYMBOL(ucc_fast_init);
void ucc_fast_free(struct ucc_fast_private * uccf)
{
if (!uccf)
return;
if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
if (uccf->uf_regs)
iounmap(uccf->uf_regs);
kfree(uccf);
}
EXPORT_SYMBOL(ucc_fast_free);
| gpl-2.0 |
Jackeagle/android_kernel_huawei_msm8916 | arch/x86/kernel/cpu/perf_event_p6.c | 2261 | 6446 | #include <linux/perf_event.h>
#include <linux/types.h>
#include "perf_event.h"
/*
* Not sure about some of these
*/
static const u64 p6_perfmon_event_map[] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079, /* CPU_CLK_UNHALTED */
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, /* INST_RETIRED */
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, /* L2_RQSTS:M:E:S:I */
[PERF_COUNT_HW_CACHE_MISSES] = 0x012e, /* L2_RQSTS:I */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, /* BR_INST_RETIRED */
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, /* BR_MISS_PRED_RETIRED */
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062, /* BUS_DRDY_CLOCKS */
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a2, /* RESOURCE_STALLS */
};
static const u64 __initconst p6_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
[ C(RESULT_MISS) ] = 0x0045, /* DCU_LINES_IN */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0x0f29, /* L2_LD:M:E:S:I */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
[ C(RESULT_MISS) ] = 0x0f28, /* L2_IFETCH:M:E:S:I */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0x0025, /* L2_M_LINES_INM */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0043, /* DATA_MEM_REFS */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0080, /* IFU_IFETCH */
[ C(RESULT_MISS) ] = 0x0085, /* ITLB_MISS */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED */
[ C(RESULT_MISS) ] = 0x00c5, /* BR_MISS_PRED_RETIRED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static u64 p6_pmu_event_map(int hw_event)
{
return p6_perfmon_event_map[hw_event];
}
/*
* Event setting that is specified not to count anything.
* We use this to effectively disable a counter.
*
* L2_RQSTS with 0 MESI unit mask.
*/
#define P6_NOP_EVENT 0x0000002EULL
static struct event_constraint p6_event_constraints[] =
{
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
EVENT_CONSTRAINT_END
};
static void p6_pmu_disable_all(void)
{
u64 val;
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(MSR_P6_EVNTSEL0, val);
}
static void p6_pmu_enable_all(int added)
{
unsigned long val;
/* p6 only has one enable register */
rdmsrl(MSR_P6_EVNTSEL0, val);
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(MSR_P6_EVNTSEL0, val);
}
static inline void
p6_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 val = P6_NOP_EVENT;
(void)wrmsrl_safe(hwc->config_base, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 val;
val = hwc->config;
/*
* p6 only has a global event enable, set on PerfEvtSel0
* We "disable" events by programming P6_NOP_EVENT
* and we rely on p6_pmu_enable_all() being called
* to actually enable the events.
*/
(void)wrmsrl_safe(hwc->config_base, val);
}
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
PMU_FORMAT_ATTR(pc, "config:19" );
PMU_FORMAT_ATTR(inv, "config:23" );
PMU_FORMAT_ATTR(cmask, "config:24-31" );
static struct attribute *intel_p6_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
&format_attr_edge.attr,
&format_attr_pc.attr,
&format_attr_inv.attr,
&format_attr_cmask.attr,
NULL,
};
static __initconst const struct x86_pmu p6_pmu = {
.name = "p6",
.handle_irq = x86_pmu_handle_irq,
.disable_all = p6_pmu_disable_all,
.enable_all = p6_pmu_enable_all,
.enable = p6_pmu_enable_event,
.disable = p6_pmu_disable_event,
.hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_P6_EVNTSEL0,
.perfctr = MSR_P6_PERFCTR0,
.event_map = p6_pmu_event_map,
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
.num_counters = 2,
/*
* Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
* effective width of a event for P6-like PMU is 32 bits only.
*
* See IA-32 Intel Architecture Software developer manual Vol 3B
*/
.cntval_bits = 32,
.cntval_mask = (1ULL << 32) - 1,
.get_event_constraints = x86_get_event_constraints,
.event_constraints = p6_event_constraints,
.format_attrs = intel_p6_formats_attr,
.events_sysfs_show = intel_event_sysfs_show,
};
__init int p6_pmu_init(void)
{
switch (boot_cpu_data.x86_model) {
case 1:
case 3: /* Pentium Pro */
case 5:
case 6: /* Pentium II */
case 7:
case 8:
case 11: /* Pentium III */
case 9:
case 13:
/* Pentium M */
break;
default:
pr_cont("unsupported p6 CPU model %d ",
boot_cpu_data.x86_model);
return -ENODEV;
}
x86_pmu = p6_pmu;
memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
return 0;
}
| gpl-2.0 |
dlumberg/kernel_asus_tf101 | drivers/hwmon/lm70.c | 3029 | 6042 | /*
* lm70.c
*
* The LM70 is a temperature sensor chip from National Semiconductor (NS).
* Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
*
* The LM70 communicates with a host processor via an SPI/Microwire Bus
* interface. The complete datasheet is available at National's website
* here:
* http://www.national.com/pf/LM/LM70.html
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/hwmon.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#define DRVNAME "lm70"
#define LM70_CHIP_LM70 0 /* original NS LM70 */
#define LM70_CHIP_TMP121 1 /* TI TMP121/TMP123 */
struct lm70 {
struct device *hwmon_dev;
struct mutex lock;
unsigned int chip;
};
/* sysfs hook function */
static ssize_t lm70_sense_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct spi_device *spi = to_spi_device(dev);
int status, val = 0;
u8 rxbuf[2];
s16 raw=0;
struct lm70 *p_lm70 = spi_get_drvdata(spi);
if (mutex_lock_interruptible(&p_lm70->lock))
return -ERESTARTSYS;
/*
* spi_read() requires a DMA-safe buffer; so we use
* spi_write_then_read(), transmitting 0 bytes.
*/
status = spi_write_then_read(spi, NULL, 0, &rxbuf[0], 2);
if (status < 0) {
pr_warn("spi_write_then_read failed with status %d\n", status);
goto out;
}
raw = (rxbuf[0] << 8) + rxbuf[1];
dev_dbg(dev, "rxbuf[0] : 0x%02x rxbuf[1] : 0x%02x raw=0x%04x\n",
rxbuf[0], rxbuf[1], raw);
/*
* LM70:
* The "raw" temperature read into rxbuf[] is a 16-bit signed 2's
* complement value. Only the MSB 11 bits (1 sign + 10 temperature
* bits) are meaningful; the LSB 5 bits are to be discarded.
* See the datasheet.
*
* Further, each bit represents 0.25 degrees Celsius; so, multiply
* by 0.25. Also multiply by 1000 to represent in millidegrees
* Celsius.
* So it's equivalent to multiplying by 0.25 * 1000 = 250.
*
* TMP121/TMP123:
* 13 bits of 2's complement data, discard LSB 3 bits,
* resolution 0.0625 degrees celsius.
*/
switch (p_lm70->chip) {
case LM70_CHIP_LM70:
val = ((int)raw / 32) * 250;
break;
case LM70_CHIP_TMP121:
val = ((int)raw / 8) * 625 / 10;
break;
}
status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */
out:
mutex_unlock(&p_lm70->lock);
return status;
}
static DEVICE_ATTR(temp1_input, S_IRUGO, lm70_sense_temp, NULL);
static ssize_t lm70_show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct lm70 *p_lm70 = dev_get_drvdata(dev);
int ret;
switch (p_lm70->chip) {
case LM70_CHIP_LM70:
ret = sprintf(buf, "lm70\n");
break;
case LM70_CHIP_TMP121:
ret = sprintf(buf, "tmp121\n");
break;
default:
ret = -EINVAL;
}
return ret;
}
static DEVICE_ATTR(name, S_IRUGO, lm70_show_name, NULL);
/*----------------------------------------------------------------------*/
static int __devinit lm70_probe(struct spi_device *spi)
{
int chip = spi_get_device_id(spi)->driver_data;
struct lm70 *p_lm70;
int status;
/* signaling is SPI_MODE_0 for both LM70 and TMP121 */
if (spi->mode & (SPI_CPOL | SPI_CPHA))
return -EINVAL;
/* 3-wire link (shared SI/SO) for LM70 */
if (chip == LM70_CHIP_LM70 && !(spi->mode & SPI_3WIRE))
return -EINVAL;
/* NOTE: we assume 8-bit words, and convert to 16 bits manually */
p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
if (!p_lm70)
return -ENOMEM;
mutex_init(&p_lm70->lock);
p_lm70->chip = chip;
/* sysfs hook */
p_lm70->hwmon_dev = hwmon_device_register(&spi->dev);
if (IS_ERR(p_lm70->hwmon_dev)) {
dev_dbg(&spi->dev, "hwmon_device_register failed.\n");
status = PTR_ERR(p_lm70->hwmon_dev);
goto out_dev_reg_failed;
}
spi_set_drvdata(spi, p_lm70);
if ((status = device_create_file(&spi->dev, &dev_attr_temp1_input))
|| (status = device_create_file(&spi->dev, &dev_attr_name))) {
dev_dbg(&spi->dev, "device_create_file failure.\n");
goto out_dev_create_file_failed;
}
return 0;
out_dev_create_file_failed:
device_remove_file(&spi->dev, &dev_attr_temp1_input);
hwmon_device_unregister(p_lm70->hwmon_dev);
out_dev_reg_failed:
spi_set_drvdata(spi, NULL);
kfree(p_lm70);
return status;
}
static int __devexit lm70_remove(struct spi_device *spi)
{
struct lm70 *p_lm70 = spi_get_drvdata(spi);
device_remove_file(&spi->dev, &dev_attr_temp1_input);
device_remove_file(&spi->dev, &dev_attr_name);
hwmon_device_unregister(p_lm70->hwmon_dev);
spi_set_drvdata(spi, NULL);
kfree(p_lm70);
return 0;
}
static const struct spi_device_id lm70_ids[] = {
{ "lm70", LM70_CHIP_LM70 },
{ "tmp121", LM70_CHIP_TMP121 },
{ },
};
MODULE_DEVICE_TABLE(spi, lm70_ids);
static struct spi_driver lm70_driver = {
.driver = {
.name = "lm70",
.owner = THIS_MODULE,
},
.id_table = lm70_ids,
.probe = lm70_probe,
.remove = __devexit_p(lm70_remove),
};
static int __init init_lm70(void)
{
return spi_register_driver(&lm70_driver);
}
static void __exit cleanup_lm70(void)
{
spi_unregister_driver(&lm70_driver);
}
module_init(init_lm70);
module_exit(cleanup_lm70);
MODULE_AUTHOR("Kaiwan N Billimoria");
MODULE_DESCRIPTION("NS LM70 / TI TMP121/TMP123 Linux driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ztemt/N939Sc_5.1_kenel | arch/xtensa/variants/s6000/dmac.c | 3285 | 4808 | /*
* Authors: Oskar Schirmer <oskar@scara.com>
* Daniel Gloeckner <dg@emlix.com>
* (c) 2008 emlix GmbH http://www.emlix.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <asm/cacheflush.h>
#include <variant/dmac.h>
/* DMA engine lookup */
struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
/* DMA control, per engine */
void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
{
if (xtensa_need_flush_dma_source(src)) {
u32 base = src;
u32 span = size;
u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
if (chunk && (size > chunk)) {
s32 skip =
readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
u32 gaps = (size+chunk-1)/chunk - 1;
if (skip >= 0) {
span += gaps * skip;
} else if (-skip > chunk) {
s32 decr = gaps * (chunk + skip);
base += decr;
span = chunk - decr;
} else {
span = max(span + gaps * skip,
(chunk + skip) * gaps - skip);
}
}
flush_dcache_unaligned(base, span);
}
if (xtensa_need_invalidate_dma_destination(dst)) {
u32 base = dst;
u32 span = size;
u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
if (chunk && (size > chunk)) {
s32 skip =
readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
u32 gaps = (size+chunk-1)/chunk - 1;
if (skip >= 0) {
span += gaps * skip;
} else if (-skip > chunk) {
s32 decr = gaps * (chunk + skip);
base += decr;
span = chunk - decr;
} else {
span = max(span + gaps * skip,
(chunk + skip) * gaps - skip);
}
}
invalidate_dcache_unaligned(base, span);
}
s6dmac_put_fifo(dmac, chan, src, dst, size);
}
void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
{
unsigned long flags;
spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
spin_lock_irqsave(spinl, flags);
_s6dmac_disable_error_irqs(dmac, mask);
spin_unlock_irqrestore(spinl, flags);
}
u32 s6dmac_int_sources(u32 dmac, u32 channel)
{
u32 mask, ret, tmp;
mask = 1 << channel;
tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
tmp &= mask;
writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
ret = tmp >> channel;
tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
tmp &= mask;
writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
ret |= (tmp >> channel) << 1;
tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
tmp &= mask;
writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
ret |= (tmp >> channel) << 2;
tmp = readl(dmac + S6_DMA_INTRAW0);
tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
writel(tmp, dmac + S6_DMA_INTCLEAR0);
if (tmp & (mask << S6_DMA_INT0_UNDER))
ret |= 1 << 3;
if (tmp & (mask << S6_DMA_INT0_OVER))
ret |= 1 << 4;
tmp = readl(dmac + S6_DMA_MASTERERRINFO);
mask <<= S6_DMA_INT1_CHANNEL;
if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
== channel)
mask |= 1 << S6_DMA_INT1_MASTER;
if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
== channel)
mask |= 1 << (S6_DMA_INT1_MASTER + 1);
if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
== channel)
mask |= 1 << (S6_DMA_INT1_MASTER + 2);
tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
writel(tmp, dmac + S6_DMA_INTCLEAR1);
ret |= ((tmp >> channel) & 1) << 5;
ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
return ret;
}
void s6dmac_release_chan(u32 dmac, int chan)
{
if (chan >= 0)
s6dmac_disable_chan(dmac, chan);
}
/* global init */
static inline void __init dmac_init(u32 dmac, u8 chan_nb)
{
s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
dmac + S6_DMA_INTCLEAR1);
}
static inline void __init dmac_master(u32 dmac,
u32 m0start, u32 m0end, u32 m1start, u32 m1end)
{
writel(m0start, dmac + S6_DMA_MASTER0START);
writel(m0end - 1, dmac + S6_DMA_MASTER0END);
writel(m1start, dmac + S6_DMA_MASTER1START);
writel(m1end - 1, dmac + S6_DMA_MASTER1END);
}
static void __init s6_dmac_init(void)
{
dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
dmac_master(S6_REG_LMSDMA,
S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
dmac_master(S6_REG_DPDMA,
S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
dmac_master(S6_REG_HIFDMA,
S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
}
arch_initcall(s6_dmac_init);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.